summaryrefslogtreecommitdiff
path: root/jstests/concurrency/fsm_workloads
diff options
context:
space:
mode:
Diffstat (limited to 'jstests/concurrency/fsm_workloads')
-rw-r--r--jstests/concurrency/fsm_workloads/agg_base.js22
-rw-r--r--jstests/concurrency/fsm_workloads/agg_group_external.js88
-rw-r--r--jstests/concurrency/fsm_workloads/agg_match.js50
-rw-r--r--jstests/concurrency/fsm_workloads/agg_sort.js50
-rw-r--r--jstests/concurrency/fsm_workloads/agg_sort_external.js81
-rw-r--r--jstests/concurrency/fsm_workloads/auth_create_role.js18
-rw-r--r--jstests/concurrency/fsm_workloads/auth_create_user.js12
-rw-r--r--jstests/concurrency/fsm_workloads/auth_drop_role.js21
-rw-r--r--jstests/concurrency/fsm_workloads/auth_drop_user.js13
-rw-r--r--jstests/concurrency/fsm_workloads/collmod.js19
-rw-r--r--jstests/concurrency/fsm_workloads/collmod_separate_collections.js52
-rw-r--r--jstests/concurrency/fsm_workloads/compact.js43
-rw-r--r--jstests/concurrency/fsm_workloads/compact_simultaneous_padding_bytes.js47
-rw-r--r--jstests/concurrency/fsm_workloads/convert_to_capped_collection.js8
-rw-r--r--jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js22
-rw-r--r--jstests/concurrency/fsm_workloads/count.js10
-rw-r--r--jstests/concurrency/fsm_workloads/count_indexed.js49
-rw-r--r--jstests/concurrency/fsm_workloads/count_limit_skip.js83
-rw-r--r--jstests/concurrency/fsm_workloads/create_capped_collection.js25
-rw-r--r--jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js83
-rw-r--r--jstests/concurrency/fsm_workloads/create_collection.js6
-rw-r--r--jstests/concurrency/fsm_workloads/create_index_background.js98
-rw-r--r--jstests/concurrency/fsm_workloads/distinct.js12
-rw-r--r--jstests/concurrency/fsm_workloads/distinct_noindex.js9
-rw-r--r--jstests/concurrency/fsm_workloads/distinct_projection.js26
-rw-r--r--jstests/concurrency/fsm_workloads/drop_collection.js4
-rw-r--r--jstests/concurrency/fsm_workloads/drop_database.js4
-rw-r--r--jstests/concurrency/fsm_workloads/explain.js28
-rw-r--r--jstests/concurrency/fsm_workloads/explain_aggregate.js76
-rw-r--r--jstests/concurrency/fsm_workloads/explain_count.js101
-rw-r--r--jstests/concurrency/fsm_workloads/explain_distinct.js48
-rw-r--r--jstests/concurrency/fsm_workloads/explain_find.js111
-rw-r--r--jstests/concurrency/fsm_workloads/explain_group.js37
-rw-r--r--jstests/concurrency/fsm_workloads/explain_remove.js68
-rw-r--r--jstests/concurrency/fsm_workloads/explain_update.js114
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_inc.js19
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_remove.js12
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js43
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_update.js32
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js22
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_update_grow.js43
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_update_queue.js104
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_upsert.js21
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js15
-rw-r--r--jstests/concurrency/fsm_workloads/group.js12
-rw-r--r--jstests/concurrency/fsm_workloads/group_cond.js46
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_1char.js25
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_1char_noindex.js6
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_2d.js78
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js23
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_base.js16
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_base_capped.js6
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_base_noindex.js6
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_compound.js48
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_eval.js42
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_eval_nolock.js13
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js94
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous_noindex.js6
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_large.js49
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_large_noindex.js6
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js24
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname_noindex.js6
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_multikey.js30
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_multikey_noindex.js6
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js44
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_text.js6
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js44
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_ttl.js20
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js44
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_upsert.js50
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_where.js19
-rw-r--r--jstests/concurrency/fsm_workloads/list_indexes.js9
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_drop.js11
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_inline.js6
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_merge.js77
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js94
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_reduce.js86
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js67
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_replace.js90
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js72
-rw-r--r--jstests/concurrency/fsm_workloads/plan_cache_drop_database.js12
-rw-r--r--jstests/concurrency/fsm_workloads/reindex.js68
-rw-r--r--jstests/concurrency/fsm_workloads/reindex_background.js36
-rw-r--r--jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js4
-rw-r--r--jstests/concurrency/fsm_workloads/remove_multiple_documents.js18
-rw-r--r--jstests/concurrency/fsm_workloads/remove_single_document.js14
-rw-r--r--jstests/concurrency/fsm_workloads/remove_single_document_eval.js50
-rw-r--r--jstests/concurrency/fsm_workloads/remove_single_document_eval_nolock.js13
-rw-r--r--jstests/concurrency/fsm_workloads/remove_where.js57
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js6
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js6
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js6
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js10
-rw-r--r--jstests/concurrency/fsm_workloads/rename_collection_chain.js6
-rw-r--r--jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js6
-rw-r--r--jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js6
-rw-r--r--jstests/concurrency/fsm_workloads/rename_collection_droptarget.js10
-rw-r--r--jstests/concurrency/fsm_workloads/server_status.js2
-rw-r--r--jstests/concurrency/fsm_workloads/touch_base.js93
-rw-r--r--jstests/concurrency/fsm_workloads/touch_data.js24
-rw-r--r--jstests/concurrency/fsm_workloads/touch_index.js24
-rw-r--r--jstests/concurrency/fsm_workloads/touch_no_data_no_index.js34
-rw-r--r--jstests/concurrency/fsm_workloads/update_and_bulk_insert.js6
-rw-r--r--jstests/concurrency/fsm_workloads/update_array.js41
-rw-r--r--jstests/concurrency/fsm_workloads/update_array_noindex.js6
-rw-r--r--jstests/concurrency/fsm_workloads/update_check_index.js14
-rw-r--r--jstests/concurrency/fsm_workloads/update_inc.js21
-rw-r--r--jstests/concurrency/fsm_workloads/update_inc_capped.js6
-rw-r--r--jstests/concurrency/fsm_workloads/update_multifield.js38
-rw-r--r--jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate.js52
-rw-r--r--jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate_noindex.js6
-rw-r--r--jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js88
-rw-r--r--jstests/concurrency/fsm_workloads/update_multifield_multiupdate_noindex.js6
-rw-r--r--jstests/concurrency/fsm_workloads/update_multifield_noindex.js6
-rw-r--r--jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js20
-rw-r--r--jstests/concurrency/fsm_workloads/update_rename.js16
-rw-r--r--jstests/concurrency/fsm_workloads/update_rename_noindex.js6
-rw-r--r--jstests/concurrency/fsm_workloads/update_replace.js24
-rw-r--r--jstests/concurrency/fsm_workloads/update_replace_noindex.js6
-rw-r--r--jstests/concurrency/fsm_workloads/update_simple.js27
-rw-r--r--jstests/concurrency/fsm_workloads/update_simple_eval.js38
-rw-r--r--jstests/concurrency/fsm_workloads/update_simple_eval_nolock.js13
-rw-r--r--jstests/concurrency/fsm_workloads/update_simple_noindex.js6
-rw-r--r--jstests/concurrency/fsm_workloads/update_upsert_multi.js37
-rw-r--r--jstests/concurrency/fsm_workloads/update_upsert_multi_noindex.js6
-rw-r--r--jstests/concurrency/fsm_workloads/update_where.js82
-rw-r--r--jstests/concurrency/fsm_workloads/upsert_where.js64
-rw-r--r--jstests/concurrency/fsm_workloads/yield.js50
-rw-r--r--jstests/concurrency/fsm_workloads/yield_and_hashed.js109
-rw-r--r--jstests/concurrency/fsm_workloads/yield_and_sorted.js92
-rw-r--r--jstests/concurrency/fsm_workloads/yield_fetch.js35
-rw-r--r--jstests/concurrency/fsm_workloads/yield_geo_near.js127
-rw-r--r--jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js148
-rw-r--r--jstests/concurrency/fsm_workloads/yield_id_hack.js43
-rw-r--r--jstests/concurrency/fsm_workloads/yield_rooted_or.js78
-rw-r--r--jstests/concurrency/fsm_workloads/yield_sort.js59
-rw-r--r--jstests/concurrency/fsm_workloads/yield_sort_merge.js82
-rw-r--r--jstests/concurrency/fsm_workloads/yield_text.js77
138 files changed, 2700 insertions, 2470 deletions
diff --git a/jstests/concurrency/fsm_workloads/agg_base.js b/jstests/concurrency/fsm_workloads/agg_base.js
index 3ce16aaea31..846e6900215 100644
--- a/jstests/concurrency/fsm_workloads/agg_base.js
+++ b/jstests/concurrency/fsm_workloads/agg_base.js
@@ -31,8 +31,8 @@ var $config = (function() {
// overhead
doc.padding = "";
var paddingLength = size - Object.bsonsize(doc);
- assertAlways.lte(0, paddingLength,
- 'document is already bigger than ' + size + ' bytes: ' + tojson(doc));
+ assertAlways.lte(
+ 0, paddingLength, 'document is already bigger than ' + size + ' bytes: ' + tojson(doc));
doc.padding = getStringOfLength(paddingLength);
assertAlways.eq(size, Object.bsonsize(doc));
return doc;
@@ -46,7 +46,7 @@ var $config = (function() {
};
var transitions = {
- query: { query: 1 }
+ query: {query: 1}
};
function setup(db, collName, cluster) {
@@ -55,18 +55,20 @@ var $config = (function() {
for (var i = 0; i < this.numDocs; ++i) {
// note: padDoc caches the large string after allocating it once, so it's ok to call it
// in this loop
- bulk.insert(padDoc({
- flag: i % 2 ? true : false,
- rand: Random.rand(),
- randInt: Random.randInt(this.numDocs)
- }, this.docSize));
+ bulk.insert(padDoc(
+ {
+ flag: i % 2 ? true : false,
+ rand: Random.rand(),
+ randInt: Random.randInt(this.numDocs)
+ },
+ this.docSize));
}
var res = bulk.execute();
assertWhenOwnColl.writeOK(res);
assertWhenOwnColl.eq(this.numDocs, res.nInserted);
assertWhenOwnColl.eq(this.numDocs, db[collName].find().itcount());
- assertWhenOwnColl.eq(this.numDocs / 2, db[collName].find({ flag: false }).itcount());
- assertWhenOwnColl.eq(this.numDocs / 2, db[collName].find({ flag: true }).itcount());
+ assertWhenOwnColl.eq(this.numDocs / 2, db[collName].find({flag: false}).itcount());
+ assertWhenOwnColl.eq(this.numDocs / 2, db[collName].find({flag: true}).itcount());
}
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/agg_group_external.js b/jstests/concurrency/fsm_workloads/agg_group_external.js
index 3c3cf973434..38c47d79f13 100644
--- a/jstests/concurrency/fsm_workloads/agg_group_external.js
+++ b/jstests/concurrency/fsm_workloads/agg_group_external.js
@@ -8,47 +8,47 @@
* The data passed to the $group is greater than 100MB, which should force
* disk to be used.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-
-var $config = extendWorkload($config, function($config, $super) {
-
- // use enough docs to exceed 100MB, the in-memory limit for $sort and $group
- $config.data.numDocs = 24 * 1000;
- var MB = 1024 * 1024; // bytes
- assertAlways.lte(100 * MB, $config.data.numDocs * $config.data.docSize);
-
- // assume no other workload will manipulate collections with this prefix
- $config.data.getOutputCollPrefix = function getOutputCollPrefix(collName) {
- return collName + '_out_agg_group_external_';
- };
-
- $config.states.query = function query(db, collName) {
- var otherCollName = this.getOutputCollPrefix(collName) + this.tid;
- var cursor = db[collName].aggregate([
- { $group: { _id: '$randInt', count: { $sum: 1 } } },
- { $out: otherCollName }
- ], {
- allowDiskUse: true
- });
- assertAlways.eq(0, cursor.itcount());
- assertWhenOwnColl(function() {
- // sum the .count fields in the output coll
- var sum = db[otherCollName].aggregate([
- { $group: { _id: null, totalCount: { $sum: '$count' } } }
- ]).toArray()[0].totalCount;
- assertWhenOwnColl.eq(this.numDocs, sum);
- }.bind(this));
- };
-
- $config.teardown = function teardown(db, collName, cluster) {
- $super.teardown.apply(this, arguments);
-
- // drop all collections with this workload's assumed-to-be-unique prefix
- // NOTE: assumes the prefix contains no special regex chars
- dropCollections(db, new RegExp('^' + this.getOutputCollPrefix(collName)));
- };
-
- return $config;
-});
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+
+ // use enough docs to exceed 100MB, the in-memory limit for $sort and $group
+ $config.data.numDocs = 24 * 1000;
+ var MB = 1024 * 1024; // bytes
+ assertAlways.lte(100 * MB, $config.data.numDocs * $config.data.docSize);
+
+ // assume no other workload will manipulate collections with this prefix
+ $config.data.getOutputCollPrefix = function getOutputCollPrefix(collName) {
+ return collName + '_out_agg_group_external_';
+ };
+
+ $config.states.query = function query(db, collName) {
+ var otherCollName = this.getOutputCollPrefix(collName) + this.tid;
+ var cursor = db[collName].aggregate(
+ [{$group: {_id: '$randInt', count: {$sum: 1}}}, {$out: otherCollName}],
+ {allowDiskUse: true});
+ assertAlways.eq(0, cursor.itcount());
+ assertWhenOwnColl(function() {
+ // sum the .count fields in the output coll
+ var sum = db[otherCollName]
+ .aggregate([{$group: {_id: null, totalCount: {$sum: '$count'}}}])
+ .toArray()[0]
+ .totalCount;
+ assertWhenOwnColl.eq(this.numDocs, sum);
+ }.bind(this));
+ };
+
+ $config.teardown = function teardown(db, collName, cluster) {
+ $super.teardown.apply(this, arguments);
+
+ // drop all collections with this workload's assumed-to-be-unique prefix
+ // NOTE: assumes the prefix contains no special regex chars
+ dropCollections(db, new RegExp('^' + this.getOutputCollPrefix(collName)));
+ };
+
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/agg_match.js b/jstests/concurrency/fsm_workloads/agg_match.js
index 00e23a24c03..d93c4cdddd5 100644
--- a/jstests/concurrency/fsm_workloads/agg_match.js
+++ b/jstests/concurrency/fsm_workloads/agg_match.js
@@ -5,34 +5,34 @@
*
* Runs an aggregation with a $match that returns half the documents.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- $config.data.getOutCollName = function getOutCollName(collName) {
- return collName + '_out_agg_match';
- };
+ $config.data.getOutCollName = function getOutCollName(collName) {
+ return collName + '_out_agg_match';
+ };
- $config.states.query = function query(db, collName) {
- // note that all threads output to the same collection
- var otherCollName = this.getOutCollName(collName);
- var cursor = db[collName].aggregate([
- { $match: { flag: true } },
- { $out: otherCollName }
- ]);
- assertAlways.eq(0, cursor.itcount(), 'cursor returned by $out should always be empty');
- // NOTE: This relies on the fast-path for .count() with no query being isolated.
- // NOTE: There's a bug, SERVER-3645, where .count() is wrong on sharded collections, so we
- // blacklisted this test for sharded clusters.
- assertWhenOwnColl.eq(db[collName].count() / 2, db[otherCollName].count());
- };
+ $config.states.query = function query(db, collName) {
+ // note that all threads output to the same collection
+ var otherCollName = this.getOutCollName(collName);
+ var cursor = db[collName].aggregate([{$match: {flag: true}}, {$out: otherCollName}]);
+ assertAlways.eq(0, cursor.itcount(), 'cursor returned by $out should always be empty');
+ // NOTE: This relies on the fast-path for .count() with no query being isolated.
+ // NOTE: There's a bug, SERVER-3645, where .count() is wrong on sharded collections, so
+ // we
+ // blacklisted this test for sharded clusters.
+ assertWhenOwnColl.eq(db[collName].count() / 2, db[otherCollName].count());
+ };
- $config.teardown = function teardown(db, collName, cluster) {
- $super.teardown.apply(this, arguments);
+ $config.teardown = function teardown(db, collName, cluster) {
+ $super.teardown.apply(this, arguments);
- assertWhenOwnColl(db[this.getOutCollName(collName)].drop());
- };
+ assertWhenOwnColl(db[this.getOutCollName(collName)].drop());
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/agg_sort.js b/jstests/concurrency/fsm_workloads/agg_sort.js
index 2f312e0adda..03de9a1aeea 100644
--- a/jstests/concurrency/fsm_workloads/agg_sort.js
+++ b/jstests/concurrency/fsm_workloads/agg_sort.js
@@ -6,34 +6,34 @@
* Runs an aggregation with a $match that returns half the documents followed
* by a $sort on a field containing a random float.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- $config.data.getOutputCollPrefix = function getOutputCollPrefix(collName) {
- return collName + '_out_agg_sort_';
- };
+ $config.data.getOutputCollPrefix = function getOutputCollPrefix(collName) {
+ return collName + '_out_agg_sort_';
+ };
- $config.states.query = function query(db, collName) {
- var otherCollName = this.getOutputCollPrefix(collName) + this.tid;
- var cursor = db[collName].aggregate([
- { $match: { flag: true } },
- { $sort: { rand: 1 } },
- { $out: otherCollName }
- ]);
- assertAlways.eq(0, cursor.itcount());
- assertWhenOwnColl.eq(db[collName].find().itcount() / 2, db[otherCollName].find().itcount());
- };
+ $config.states.query = function query(db, collName) {
+ var otherCollName = this.getOutputCollPrefix(collName) + this.tid;
+ var cursor = db[collName].aggregate(
+ [{$match: {flag: true}}, {$sort: {rand: 1}}, {$out: otherCollName}]);
+ assertAlways.eq(0, cursor.itcount());
+ assertWhenOwnColl.eq(db[collName].find().itcount() / 2,
+ db[otherCollName].find().itcount());
+ };
- $config.teardown = function teardown(db, collName, cluster) {
- $super.teardown.apply(this, arguments);
+ $config.teardown = function teardown(db, collName, cluster) {
+ $super.teardown.apply(this, arguments);
- // drop all collections with this workload's assumed-to-be-unique prefix
- // NOTE: assumes the prefix contains no special regex chars
- dropCollections(db, new RegExp('^' + this.getOutputCollPrefix(collName)));
- };
+ // drop all collections with this workload's assumed-to-be-unique prefix
+ // NOTE: assumes the prefix contains no special regex chars
+ dropCollections(db, new RegExp('^' + this.getOutputCollPrefix(collName)));
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/agg_sort_external.js b/jstests/concurrency/fsm_workloads/agg_sort_external.js
index 161f7592d08..c2bda97e8cd 100644
--- a/jstests/concurrency/fsm_workloads/agg_sort_external.js
+++ b/jstests/concurrency/fsm_workloads/agg_sort_external.js
@@ -8,43 +8,44 @@
*
* The data returned by the $match is greater than 100MB, which should force an external sort.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-
-var $config = extendWorkload($config, function($config, $super) {
-
- // use enough docs to exceed 100MB, the in-memory limit for $sort and $group
- $config.data.numDocs = 24 * 1000;
- var MB = 1024 * 1024; // bytes
- // assert that *half* the docs exceed the in-memory limit, because the $match stage will only
- // pass half the docs in the collection on to the $sort stage.
- assertAlways.lte(100 * MB, $config.data.numDocs * $config.data.docSize / 2);
-
- $config.data.getOutputCollPrefix = function getOutputCollPrefix(collName) {
- return collName + '_out_agg_sort_external_';
- };
-
- $config.states.query = function query(db, collName) {
- var otherCollName = this.getOutputCollPrefix(collName) + this.tid;
- var cursor = db[collName].aggregate([
- { $match: { flag: true } },
- { $sort: { rand: 1 } },
- { $out: otherCollName }
- ], {
- allowDiskUse: true
- });
- assertAlways.eq(0, cursor.itcount());
- assertWhenOwnColl.eq(db[collName].find().itcount() / 2, db[otherCollName].find().itcount());
- };
-
- $config.teardown = function teardown(db, collName, cluster) {
- $super.teardown.apply(this, arguments);
-
- // drop all collections with this workload's assumed-to-be-unique prefix
- // NOTE: assumes the prefix contains no special regex chars
- dropCollections(db, new RegExp('^' + this.getOutputCollPrefix(collName)));
- };
-
- return $config;
-});
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+
+ // use enough docs to exceed 100MB, the in-memory limit for $sort and $group
+ $config.data.numDocs = 24 * 1000;
+ var MB = 1024 * 1024; // bytes
+ // assert that *half* the docs exceed the in-memory limit, because the $match stage will
+ // only
+ // pass half the docs in the collection on to the $sort stage.
+ assertAlways.lte(100 * MB, $config.data.numDocs * $config.data.docSize / 2);
+
+ $config.data.getOutputCollPrefix = function getOutputCollPrefix(collName) {
+ return collName + '_out_agg_sort_external_';
+ };
+
+ $config.states.query = function query(db, collName) {
+ var otherCollName = this.getOutputCollPrefix(collName) + this.tid;
+ var cursor =
+ db[collName]
+ .aggregate([{$match: {flag: true}}, {$sort: {rand: 1}}, {$out: otherCollName}],
+ {allowDiskUse: true});
+ assertAlways.eq(0, cursor.itcount());
+ assertWhenOwnColl.eq(db[collName].find().itcount() / 2,
+ db[otherCollName].find().itcount());
+ };
+
+ $config.teardown = function teardown(db, collName, cluster) {
+ $super.teardown.apply(this, arguments);
+
+ // drop all collections with this workload's assumed-to-be-unique prefix
+ // NOTE: assumes the prefix contains no special regex chars
+ dropCollections(db, new RegExp('^' + this.getOutputCollPrefix(collName)));
+ };
+
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/auth_create_role.js b/jstests/concurrency/fsm_workloads/auth_create_role.js
index 6ad1573cb5a..8b8d3933c2d 100644
--- a/jstests/concurrency/fsm_workloads/auth_create_role.js
+++ b/jstests/concurrency/fsm_workloads/auth_create_role.js
@@ -5,7 +5,7 @@
*
* Repeatedly creates new roles on a database.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropRoles
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropRoles
var $config = (function() {
@@ -29,15 +29,9 @@ var $config = (function() {
var roleName = uniqueRoleName(this.prefix, this.tid, this.num++);
db.createRole({
role: roleName,
- privileges: [
- {
- resource: { db: db.getName(), collection: collName },
- actions: ['update']
- }
- ],
- roles: [
- { role: 'read', db: db.getName() }
- ]
+ privileges:
+ [{resource: {db: db.getName(), collection: collName}, actions: ['update']}],
+ roles: [{role: 'read', db: db.getName()}]
});
// Verify the newly created role exists, as well as all previously created roles
@@ -58,8 +52,8 @@ var $config = (function() {
})();
var transitions = {
- init: { createRole: 1 },
- createRole: { createRole: 1 }
+ init: {createRole: 1},
+ createRole: {createRole: 1}
};
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/auth_create_user.js b/jstests/concurrency/fsm_workloads/auth_create_user.js
index 7fe71f006fb..e49c63bc68e 100644
--- a/jstests/concurrency/fsm_workloads/auth_create_user.js
+++ b/jstests/concurrency/fsm_workloads/auth_create_user.js
@@ -5,7 +5,7 @@
*
* Repeatedly creates new users on a database.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropUsers
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropUsers
var $config = (function() {
@@ -27,11 +27,7 @@ var $config = (function() {
function createUser(db, collName) {
var username = uniqueUsername(this.prefix, this.tid, this.num++);
- db.createUser({
- user: username,
- pwd: 'password',
- roles: ['readWrite', 'dbAdmin']
- });
+ db.createUser({user: username, pwd: 'password', roles: ['readWrite', 'dbAdmin']});
// Verify the newly created user exists, as well as all previously created users
for (var i = 0; i < this.num; ++i) {
@@ -51,8 +47,8 @@ var $config = (function() {
})();
var transitions = {
- init: { createUser: 1 },
- createUser: { createUser: 1 }
+ init: {createUser: 1},
+ createUser: {createUser: 1}
};
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/auth_drop_role.js b/jstests/concurrency/fsm_workloads/auth_drop_role.js
index 262de710fa2..d41066dbc63 100644
--- a/jstests/concurrency/fsm_workloads/auth_drop_role.js
+++ b/jstests/concurrency/fsm_workloads/auth_drop_role.js
@@ -6,7 +6,7 @@
* Repeatedly creates a new role on a database, and subsequently
* drops it from the database.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropRoles
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropRoles
var $config = (function() {
@@ -30,15 +30,9 @@ var $config = (function() {
var roleName = uniqueRoleName(this.prefix, this.tid, this.num++);
db.createRole({
role: roleName,
- privileges: [
- {
- resource: { db: db.getName(), collection: collName },
- actions: ['remove']
- }
- ],
- roles: [
- { role: 'read', db: db.getName() }
- ]
+ privileges:
+ [{resource: {db: db.getName(), collection: collName}, actions: ['remove']}],
+ roles: [{role: 'read', db: db.getName()}]
});
var res = db.getRole(roleName);
@@ -47,8 +41,7 @@ var $config = (function() {
assertAlways(!res.isBuiltin, 'role should be user-defined');
assertAlways(db.dropRole(roleName));
- assertAlways.isnull(db.getRole(roleName),
- "role '" + roleName + "' should not exist");
+ assertAlways.isnull(db.getRole(roleName), "role '" + roleName + "' should not exist");
}
return {
@@ -59,8 +52,8 @@ var $config = (function() {
})();
var transitions = {
- init: { createAndDropRole: 1 },
- createAndDropRole: { createAndDropRole: 1 }
+ init: {createAndDropRole: 1},
+ createAndDropRole: {createAndDropRole: 1}
};
return {
diff --git a/jstests/concurrency/fsm_workloads/auth_drop_user.js b/jstests/concurrency/fsm_workloads/auth_drop_user.js
index 96f41eb4160..65cb8e41da2 100644
--- a/jstests/concurrency/fsm_workloads/auth_drop_user.js
+++ b/jstests/concurrency/fsm_workloads/auth_drop_user.js
@@ -26,11 +26,7 @@ var $config = (function() {
function createAndDropUser(db, collName) {
var username = uniqueUsername(this.prefix, this.tid, this.num++);
- db.createUser({
- user: username,
- pwd: 'password',
- roles: ['readWrite', 'dbAdmin']
- });
+ db.createUser({user: username, pwd: 'password', roles: ['readWrite', 'dbAdmin']});
var res = db.getUser(username);
assertAlways(res !== null, "user '" + username + "' should exist");
@@ -38,8 +34,7 @@ var $config = (function() {
assertAlways.eq(db.getName(), res.db);
assertAlways(db.dropUser(username));
- assertAlways.isnull(db.getUser(username),
- "user '" + username + "' should not exist");
+ assertAlways.isnull(db.getUser(username), "user '" + username + "' should not exist");
}
return {
@@ -50,8 +45,8 @@ var $config = (function() {
})();
var transitions = {
- init: { createAndDropUser: 1 },
- createAndDropUser: { createAndDropUser: 1 }
+ init: {createAndDropUser: 1},
+ createAndDropUser: {createAndDropUser: 1}
};
return {
diff --git a/jstests/concurrency/fsm_workloads/collmod.js b/jstests/concurrency/fsm_workloads/collmod.js
index efed90ef9d1..7b803cd3284 100644
--- a/jstests/concurrency/fsm_workloads/collmod.js
+++ b/jstests/concurrency/fsm_workloads/collmod.js
@@ -14,7 +14,7 @@ var $config = (function() {
var data = {
numDocs: 1000,
- maxTTL: 5000, // max time to live
+ maxTTL: 5000, // max time to live
ttlIndexExists: true
};
@@ -22,12 +22,10 @@ var $config = (function() {
function collMod(db, collName) {
var newTTL = Random.randInt(this.maxTTL);
- var res = db.runCommand({ collMod: this.threadCollName,
- index: {
- keyPattern: { createdAt: 1 },
- expireAfterSeconds: newTTL
- }
- });
+ var res = db.runCommand({
+ collMod: this.threadCollName,
+ index: {keyPattern: {createdAt: 1}, expireAfterSeconds: newTTL}
+ });
assertAlways.commandWorked(res);
// only assert if new expireAfterSeconds differs from old one
if (res.hasOwnProperty('expireAfterSeconds_new')) {
@@ -42,7 +40,7 @@ var $config = (function() {
})();
var transitions = {
- collMod: { collMod: 1 }
+ collMod: {collMod: 1}
};
function setup(db, collName, cluster) {
@@ -50,7 +48,7 @@ var $config = (function() {
this.threadCollName = this.threadCollName || collName;
var bulk = db[this.threadCollName].initializeUnorderedBulkOp();
for (var i = 0; i < this.numDocs; ++i) {
- bulk.insert({ createdAt: new Date() });
+ bulk.insert({createdAt: new Date()});
}
var res = bulk.execute();
@@ -58,8 +56,7 @@ var $config = (function() {
assertAlways.eq(this.numDocs, res.nInserted);
// create TTL index
- res = db[this.threadCollName].ensureIndex({ createdAt: 1 },
- { expireAfterSeconds: 3600 });
+ res = db[this.threadCollName].ensureIndex({createdAt: 1}, {expireAfterSeconds: 3600});
assertAlways.commandWorked(res);
}
diff --git a/jstests/concurrency/fsm_workloads/collmod_separate_collections.js b/jstests/concurrency/fsm_workloads/collmod_separate_collections.js
index 05976a3ffce..5f9490dbaba 100644
--- a/jstests/concurrency/fsm_workloads/collmod_separate_collections.js
+++ b/jstests/concurrency/fsm_workloads/collmod_separate_collections.js
@@ -9,34 +9,36 @@
*
* Each thread updates a TTL index on a separate collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/collmod.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/collmod.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-var $config = extendWorkload($config, function($config, $super) {
- $config.data.prefix = 'collmod_separate_collections';
- $config.data.shardKey = { createdAt: 1 };
+var $config = extendWorkload($config,
+ function($config, $super) {
+ $config.data.prefix = 'collmod_separate_collections';
+ $config.data.shardKey = {
+ createdAt: 1
+ };
- $config.states.init = function init(db, collName) {
- this.threadCollName = this.prefix + '_' + this.tid;
- $super.setup.call(this, db, this.threadCollName);
- };
+ $config.states.init = function init(db, collName) {
+ this.threadCollName = this.prefix + '_' + this.tid;
+ $super.setup.call(this, db, this.threadCollName);
+ };
- $config.transitions = Object.extend({
- init: { collMod: 1 }
- }, $super.transitions);
+ $config.transitions =
+ Object.extend({init: {collMod: 1}}, $super.transitions);
- $config.setup = function setup(db, collName, cluster) {
- // no-op: since the init state is used to setup
- // the separate collections on a per-thread basis.
- };
+ $config.setup = function setup(db, collName, cluster) {
+ // no-op: since the init state is used to setup
+ // the separate collections on a per-thread basis.
+ };
- $config.teardown = function teardown(db, collName, cluster) {
- var pattern = new RegExp('^' + this.prefix + '_\\d+$');
- dropCollections(db, pattern);
- $super.teardown.apply(this, arguments);
- };
+ $config.teardown = function teardown(db, collName, cluster) {
+ var pattern = new RegExp('^' + this.prefix + '_\\d+$');
+ dropCollections(db, pattern);
+ $super.teardown.apply(this, arguments);
+ };
- $config.startState = 'init';
- return $config;
-});
+ $config.startState = 'init';
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/compact.js b/jstests/concurrency/fsm_workloads/compact.js
index 855cd6e73fa..b80e46c0d65 100644
--- a/jstests/concurrency/fsm_workloads/compact.js
+++ b/jstests/concurrency/fsm_workloads/compact.js
@@ -8,25 +8,21 @@
* for each thread.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isEphemeral
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isEphemeral
var $config = (function() {
var data = {
nDocumentsToInsert: 1000,
- nIndexes: 3 + 1, // The number of indexes created in createIndexes + 1 for { _id: 1 }
- prefix: 'compact' // Use filename for prefix because filename is assumed unique
+ nIndexes: 3 + 1, // The number of indexes created in createIndexes + 1 for { _id: 1 }
+ prefix: 'compact' // Use filename for prefix because filename is assumed unique
};
var states = (function() {
function insertDocuments(db, collName) {
var bulk = db[collName].initializeUnorderedBulkOp();
for (var i = 0; i < this.nDocumentsToInsert; ++i) {
- bulk.insert({
- a: Random.randInt(10),
- b: Random.randInt(10),
- c: Random.randInt(10)
- });
+ bulk.insert({a: Random.randInt(10), b: Random.randInt(10), c: Random.randInt(10)});
}
var res = bulk.execute();
assertAlways.writeOK(res);
@@ -35,20 +31,20 @@ var $config = (function() {
function createIndexes(db, collName) {
// The number of indexes created here is also stored in data.nIndexes
- var aResult = db[collName].ensureIndex({ a: 1 });
+ var aResult = db[collName].ensureIndex({a: 1});
assertAlways.commandWorked(aResult);
- var bResult = db[collName].ensureIndex({ b: 1 });
+ var bResult = db[collName].ensureIndex({b: 1});
assertAlways.commandWorked(bResult);
- var cResult = db[collName].ensureIndex({ c: 1 });
+ var cResult = db[collName].ensureIndex({c: 1});
assertAlways.commandWorked(cResult);
}
// This method is independent of collectionSetup to allow it to be overridden in
// workloads that extend this one
function init(db, collName) {
- this.threadCollName = this.prefix + '_' + this.tid;
+ this.threadCollName = this.prefix + '_' + this.tid;
}
function collectionSetup(db, collName) {
@@ -57,11 +53,8 @@ var $config = (function() {
}
function compact(db, collName) {
- var res = db.runCommand({
- compact: this.threadCollName,
- paddingFactor: 1.0,
- force: true
- });
+ var res =
+ db.runCommand({compact: this.threadCollName, paddingFactor: 1.0, force: true});
if (!isEphemeral(db)) {
assertAlways.commandWorked(res);
} else {
@@ -71,8 +64,10 @@ var $config = (function() {
function query(db, collName) {
var count = db[this.threadCollName].find().itcount();
- assertWhenOwnColl.eq(count, this.nDocumentsToInsert, 'number of documents in ' +
- 'collection should not change following a compact');
+ assertWhenOwnColl.eq(count,
+ this.nDocumentsToInsert,
+ 'number of documents in ' +
+ 'collection should not change following a compact');
var indexesCount = db[this.threadCollName].getIndexes().length;
assertWhenOwnColl.eq(indexesCount, this.nIndexes);
}
@@ -86,10 +81,10 @@ var $config = (function() {
})();
var transitions = {
- init: { collectionSetup: 1 },
- collectionSetup: { compact: 0.5, query: 0.5 },
- compact: { compact: 0.5, query: 0.5 },
- query: { compact: 0.5, query: 0.5 }
+ init: {collectionSetup: 1},
+ collectionSetup: {compact: 0.5, query: 0.5},
+ compact: {compact: 0.5, query: 0.5},
+ query: {compact: 0.5, query: 0.5}
};
var teardown = function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/compact_simultaneous_padding_bytes.js b/jstests/concurrency/fsm_workloads/compact_simultaneous_padding_bytes.js
index dc8d9881f69..22eef359b87 100644
--- a/jstests/concurrency/fsm_workloads/compact_simultaneous_padding_bytes.js
+++ b/jstests/concurrency/fsm_workloads/compact_simultaneous_padding_bytes.js
@@ -8,31 +8,30 @@
* for all threads. Uses paddingBytes as a parameter for compact.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/compact.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isEphemeral
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/compact.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isEphemeral
-var $config = extendWorkload($config, function($config, $super) {
- $config.states.init = function init(db, collName) {
- this.threadCollName = collName;
- };
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+ $config.states.init = function init(db, collName) {
+ this.threadCollName = collName;
+ };
- $config.states.compact = function compact(db, collName) {
- var res = db.runCommand({
- compact: this.threadCollName,
- paddingBytes: 1024 * 5,
- force: true
- });
- if (!isEphemeral(db)) {
- assertAlways.commandWorked(res);
- } else {
- assertAlways.commandFailedWithCode(res, ErrorCodes.CommandNotSupported);
- }
- };
+ $config.states.compact = function compact(db, collName) {
+ var res =
+ db.runCommand({compact: this.threadCollName, paddingBytes: 1024 * 5, force: true});
+ if (!isEphemeral(db)) {
+ assertAlways.commandWorked(res);
+ } else {
+ assertAlways.commandFailedWithCode(res, ErrorCodes.CommandNotSupported);
+ }
+ };
- // no-op the query state because querying while compacting can result in closed cursors
- // as per SERVER-3964, as well as inaccurate counts, leaving nothing to assert.
- $config.states.query = function query(db, collName) { };
+ // no-op the query state because querying while compacting can result in closed cursors
+ // as per SERVER-3964, as well as inaccurate counts, leaving nothing to assert.
+ $config.states.query = function query(db, collName) {};
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js b/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
index 92000e0e164..79b9934077b 100644
--- a/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
+++ b/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
@@ -32,7 +32,7 @@ var $config = (function() {
var bulk = db[this.threadCollName].initializeUnorderedBulkOp();
for (var i = 0; i < (this.tid + 1) * 200; i++) {
- bulk.insert({ i: i, rand: Random.rand() });
+ bulk.insert({i: i, rand: Random.rand()});
}
var res = bulk.execute();
@@ -58,7 +58,7 @@ var $config = (function() {
var indexKeys = db[this.threadCollName].getIndexKeys();
assertWhenOwnDB.eq(1, indexKeys.length);
assertWhenOwnDB(function() {
- assertWhenOwnDB.docEq({ _id: 1 }, indexKeys[0]);
+ assertWhenOwnDB.docEq({_id: 1}, indexKeys[0]);
});
}
@@ -69,8 +69,8 @@ var $config = (function() {
})();
var transitions = {
- init: { convertToCapped: 1 },
- convertToCapped: { convertToCapped: 1 }
+ init: {convertToCapped: 1},
+ convertToCapped: {convertToCapped: 1}
};
function setup(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js b/jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js
index dd6716c750d..2eaa8e261b2 100644
--- a/jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js
+++ b/jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js
@@ -13,15 +13,17 @@
* but that only the _id index remains after (re-)converting
* to a capped collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/convert_to_capped_collection.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/convert_to_capped_collection.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
- $config.states.convertToCapped = function convertToCapped(db, collName) {
- assertWhenOwnDB.commandWorked(db[this.threadCollName].ensureIndex({ i: 1, rand: 1 }));
- assertWhenOwnDB.eq(2, db[this.threadCollName].getIndexes().length);
- $super.states.convertToCapped.apply(this, arguments);
- };
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+ $config.states.convertToCapped = function convertToCapped(db, collName) {
+ assertWhenOwnDB.commandWorked(db[this.threadCollName].ensureIndex({i: 1, rand: 1}));
+ assertWhenOwnDB.eq(2, db[this.threadCollName].getIndexes().length);
+ $super.states.convertToCapped.apply(this, arguments);
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/count.js b/jstests/concurrency/fsm_workloads/count.js
index 626ac49a4c8..61a4c93d3ab 100644
--- a/jstests/concurrency/fsm_workloads/count.js
+++ b/jstests/concurrency/fsm_workloads/count.js
@@ -23,7 +23,7 @@ var $config = (function() {
return this.modulus * this.countPerNum;
},
getCount: function getCount(db, predicate) {
- var query = Object.extend({ tid: this.tid }, predicate);
+ var query = Object.extend({tid: this.tid}, predicate);
return db[this.threadCollName].count(query);
}
};
@@ -39,7 +39,7 @@ var $config = (function() {
var bulk = db[this.threadCollName].initializeUnorderedBulkOp();
for (var i = 0; i < this.getNumDocs(); ++i) {
- bulk.insert({ i: i % this.modulus, tid: this.tid });
+ bulk.insert({i: i % this.modulus, tid: this.tid});
}
var res = bulk.execute();
assertAlways.writeOK(res);
@@ -50,7 +50,7 @@ var $config = (function() {
assertWhenOwnColl.eq(this.getCount(db), this.getNumDocs());
var num = Random.randInt(this.modulus);
- assertWhenOwnColl.eq(this.getCount(db, { i: num }), this.countPerNum);
+ assertWhenOwnColl.eq(this.getCount(db, {i: num}), this.countPerNum);
}
return {
@@ -61,8 +61,8 @@ var $config = (function() {
})();
var transitions = {
- init: { count: 1 },
- count: { count: 1 }
+ init: {count: 1},
+ count: {count: 1}
};
return {
diff --git a/jstests/concurrency/fsm_workloads/count_indexed.js b/jstests/concurrency/fsm_workloads/count_indexed.js
index dc49593f46f..d7a49c6fb40 100644
--- a/jstests/concurrency/fsm_workloads/count_indexed.js
+++ b/jstests/concurrency/fsm_workloads/count_indexed.js
@@ -10,30 +10,35 @@
* and then inserts 'modulus * countPerNum' documents. [250, 1000]
* Each thread inserts docs into a unique collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/count.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/count.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-var $config = extendWorkload($config, function($config, $super) {
- $config.data.prefix = 'count_fsm';
- $config.data.shardKey = { tid: 1, i: 1 };
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+ $config.data.prefix = 'count_fsm';
+ $config.data.shardKey = {
+ tid: 1,
+ i: 1
+ };
- $config.data.getCount = function getCount(db, predicate) {
- var query = Object.extend({ tid: this.tid }, predicate);
- return db[this.threadCollName].find(query).hint({ tid: 1, i: 1 }).count();
- };
+ $config.data.getCount = function getCount(db, predicate) {
+ var query = Object.extend({tid: this.tid}, predicate);
+ return db[this.threadCollName].find(query).hint({tid: 1, i: 1}).count();
+ };
- $config.states.init = function init(db, collName) {
- this.threadCollName = this.prefix + '_' + this.tid;
- $super.states.init.apply(this, arguments);
- assertAlways.commandWorked(db[this.threadCollName].ensureIndex({ tid: 1, i: 1 }));
- };
+ $config.states.init = function init(db, collName) {
+ this.threadCollName = this.prefix + '_' + this.tid;
+ $super.states.init.apply(this, arguments);
+ assertAlways.commandWorked(db[this.threadCollName].ensureIndex({tid: 1, i: 1}));
+ };
- $config.teardown = function teardown(db, collName) {
- var pattern = new RegExp('^' + this.prefix + '_\\d+$');
- dropCollections(db, pattern);
- $super.teardown.apply(this, arguments);
- };
+ $config.teardown = function teardown(db, collName) {
+ var pattern = new RegExp('^' + this.prefix + '_\\d+$');
+ dropCollections(db, pattern);
+ $super.teardown.apply(this, arguments);
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/count_limit_skip.js b/jstests/concurrency/fsm_workloads/count_limit_skip.js
index b770e542382..999fc941f8b 100644
--- a/jstests/concurrency/fsm_workloads/count_limit_skip.js
+++ b/jstests/concurrency/fsm_workloads/count_limit_skip.js
@@ -10,43 +10,46 @@
* and then inserts 'modulus * countPerNum' documents. [250, 1000]
* Each thread inserts docs into a unique collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/count.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-
-var $config = extendWorkload($config, function($config, $super) {
- $config.data.prefix = 'count_fsm_q_l_s';
-
- $config.data.getCount = function getCount(db, predicate) {
- var query = Object.extend({ tid: this.tid }, predicate);
- return db[this.threadCollName].find(query)
- .skip(this.countPerNum - 1)
- .limit(10).count(true);
- };
-
- $config.states.init = function init(db, collName) {
- this.threadCollName = this.prefix + '_' + this.tid;
-
- $super.states.init.apply(this, arguments);
- };
-
- $config.states.count = function count(db, collName) {
- assertWhenOwnColl.eq(this.getCount(db),
- // having done 'skip(this.countPerNum - 1).limit(10)'
- 10);
-
- var num = Random.randInt(this.modulus);
- assertWhenOwnColl.eq(this.getCount(db, { i: num }),
- // having done 'skip(this.countPerNum - 1).limit(10)'
- 1);
- };
-
- $config.teardown = function teardown(db, collName) {
- var pattern = new RegExp('^' + this.prefix + '_\\d+$');
- dropCollections(db, pattern);
- $super.teardown.apply(this, arguments);
- };
-
- return $config;
-});
-
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/count.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+ $config.data.prefix = 'count_fsm_q_l_s';
+
+ $config.data.getCount = function getCount(db, predicate) {
+ var query = Object.extend({tid: this.tid}, predicate);
+ return db[this.threadCollName]
+ .find(query)
+ .skip(this.countPerNum - 1)
+ .limit(10)
+ .count(true);
+ };
+
+ $config.states.init = function init(db, collName) {
+ this.threadCollName = this.prefix + '_' + this.tid;
+
+ $super.states.init.apply(this, arguments);
+ };
+
+ $config.states.count = function count(db, collName) {
+ assertWhenOwnColl.eq(this.getCount(db),
+ // having done 'skip(this.countPerNum - 1).limit(10)'
+ 10);
+
+ var num = Random.randInt(this.modulus);
+ assertWhenOwnColl.eq(this.getCount(db, {i: num}),
+ // having done 'skip(this.countPerNum - 1).limit(10)'
+ 1);
+ };
+
+ $config.teardown = function teardown(db, collName) {
+ var pattern = new RegExp('^' + this.prefix + '_\\d+$');
+ dropCollections(db, pattern);
+ $super.teardown.apply(this, arguments);
+ };
+
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/create_capped_collection.js b/jstests/concurrency/fsm_workloads/create_capped_collection.js
index 90f2426b221..43cf7fe2b54 100644
--- a/jstests/concurrency/fsm_workloads/create_capped_collection.js
+++ b/jstests/concurrency/fsm_workloads/create_capped_collection.js
@@ -6,15 +6,18 @@
* Repeatedly creates a capped collection. Also verifies that truncation
* occurs once the collection reaches a certain size.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
var $config = (function() {
// Returns a document of the form { _id: ObjectId(...), field: '...' }
// with specified BSON size.
function makeDocWithSize(targetSize) {
- var doc = { _id: new ObjectId(), field: '' };
+ var doc = {
+ _id: new ObjectId(),
+ field: ''
+ };
var size = Object.bsonsize(doc);
assertAlways.gte(targetSize, size);
@@ -42,9 +45,11 @@ var $config = (function() {
// Returns an array containing the _id fields of all the documents
// in the collection, sorted according to their insertion order.
function getObjectIds(db, collName) {
- return db[collName].find({}, { _id: 1 }).map(function(doc) {
- return doc._id;
- });
+ return db[collName]
+ .find({}, {_id: 1})
+ .map(function(doc) {
+ return doc._id;
+ });
}
var data = {
@@ -67,7 +72,7 @@ var $config = (function() {
// Truncation in MMAPv1 has well defined behavior.
if (isMongod(db) && isMMAPv1(db)) {
ids.push(this.insert(db, myCollName, largeDocSize));
-
+
// Insert a large document and verify that a truncation has occurred.
// There should be 1 document in the collection and it should always be
// the most recently inserted document.
@@ -124,7 +129,7 @@ var $config = (function() {
var options = {
capped: true,
- size: 8192 // multiple of 256; larger than 4096 default
+ size: 8192 // multiple of 256; larger than 4096 default
};
function uniqueCollectionName(prefix, tid, num) {
@@ -151,8 +156,8 @@ var $config = (function() {
})();
var transitions = {
- init: { create: 1 },
- create: { create: 1 }
+ init: {create: 1},
+ create: {create: 1}
};
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js b/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js
index ebdc1d55723..53bc9554904 100644
--- a/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js
+++ b/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js
@@ -7,57 +7,60 @@
* occurs once the collection reaches a certain size or contains a
* certain number of documents.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/create_capped_collection.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/create_capped_collection.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- // Use the workload name as a prefix for the collection name,
- // since the workload name is assumed to be unique.
- $config.data.prefix = 'create_capped_collection_maxdocs';
+ // Use the workload name as a prefix for the collection name,
+ // since the workload name is assumed to be unique.
+ $config.data.prefix = 'create_capped_collection_maxdocs';
- var options = {
- capped: true,
- size: 8192, // multiple of 256; larger than 4096 default
- max: 3
- };
+ var options = {
+ capped: true,
+ size: 8192, // multiple of 256; larger than 4096 default
+ max: 3
+ };
- function uniqueCollectionName(prefix, tid, num) {
- return prefix + tid + '_' + num;
- }
-
- // TODO: how to avoid having too many files open?
- function create(db, collName) {
- var myCollName = uniqueCollectionName(this.prefix, this.tid, this.num++);
- assertAlways.commandWorked(db.createCollection(myCollName, options));
+ function uniqueCollectionName(prefix, tid, num) {
+ return prefix + tid + '_' + num;
+ }
- // Define a small document to be an eighth the size of the capped collection.
- var smallDocSize = Math.floor(options.size / 8) - 1;
+ // TODO: how to avoid having too many files open?
+ function create(db, collName) {
+ var myCollName = uniqueCollectionName(this.prefix, this.tid, this.num++);
+ assertAlways.commandWorked(db.createCollection(myCollName, options));
- // Verify size functionality still works as we expect
- this.verifySizeTruncation(db, myCollName, options);
+ // Define a small document to be an eighth the size of the capped collection.
+ var smallDocSize = Math.floor(options.size / 8) - 1;
- // Insert multiple small documents and verify that at least one truncation has occurred.
- // There should never be more than 3 documents in the collection, regardless of the storage
- // engine. They should always be the most recently inserted documents.
+ // Verify size functionality still works as we expect
+ this.verifySizeTruncation(db, myCollName, options);
- var ids = [];
- var count;
+ // Insert multiple small documents and verify that at least one truncation has occurred.
+ // There should never be more than 3 documents in the collection, regardless of the
+ // storage
+ // engine. They should always be the most recently inserted documents.
- ids.push(this.insert(db, myCollName, smallDocSize));
- ids.push(this.insert(db, myCollName, smallDocSize));
+ var ids = [];
+ var count;
- for (var i = 0; i < 50; i++) {
ids.push(this.insert(db, myCollName, smallDocSize));
- count = db[myCollName].find().itcount();
- assertWhenOwnDB.eq(3, count, 'expected truncation to occur due to number of docs');
- assertWhenOwnDB.eq(ids.slice(ids.length - count),
- this.getObjectIds(db, myCollName),
- 'expected truncation to remove the oldest documents');
+ ids.push(this.insert(db, myCollName, smallDocSize));
+
+ for (var i = 0; i < 50; i++) {
+ ids.push(this.insert(db, myCollName, smallDocSize));
+ count = db[myCollName].find().itcount();
+ assertWhenOwnDB.eq(3, count, 'expected truncation to occur due to number of docs');
+ assertWhenOwnDB.eq(ids.slice(ids.length - count),
+ this.getObjectIds(db, myCollName),
+ 'expected truncation to remove the oldest documents');
+ }
}
- }
- $config.states.create = create;
+ $config.states.create = create;
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/create_collection.js b/jstests/concurrency/fsm_workloads/create_collection.js
index fa2a13fb45d..fdc6d8af9fd 100644
--- a/jstests/concurrency/fsm_workloads/create_collection.js
+++ b/jstests/concurrency/fsm_workloads/create_collection.js
@@ -5,7 +5,7 @@
*
* Repeatedly creates a collection.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
var $config = (function() {
@@ -40,8 +40,8 @@ var $config = (function() {
})();
var transitions = {
- init: { create: 1 },
- create: { create: 1 }
+ init: {create: 1},
+ create: {create: 1}
};
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/create_index_background.js b/jstests/concurrency/fsm_workloads/create_index_background.js
index 4dcb1e9ec7b..046709ebdd3 100644
--- a/jstests/concurrency/fsm_workloads/create_index_background.js
+++ b/jstests/concurrency/fsm_workloads/create_index_background.js
@@ -8,7 +8,7 @@
* index has completed and the test no longer needs to execute more transitions.
* The first thread (tid = 0) will be the one that creates the background index.
*/
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
var $config = (function() {
@@ -21,7 +21,7 @@ var $config = (function() {
getHighestX: function getHighestX(coll, tid) {
// Find highest value of x.
var highest = 0;
- var cursor = coll.find({ tid: tid }).sort({ x: -1 }).limit(-1);
+ var cursor = coll.find({tid: tid}).sort({x: -1}).limit(-1);
assertWhenOwnColl(function() {
highest = cursor.next().x;
});
@@ -35,7 +35,7 @@ var $config = (function() {
// Add thread-specific documents
var bulk = db[collName].initializeUnorderedBulkOp();
for (var i = 0; i < this.nDocumentsToSeed; ++i) {
- bulk.insert({ x: i, tid: this.tid });
+ bulk.insert({x: i, tid: this.tid});
}
var res = bulk.execute();
assertAlways.writeOK(res);
@@ -47,9 +47,9 @@ var $config = (function() {
// Before creating the background index make sure insert or update
// CRUD operations are active.
assertWhenOwnColl.soon(function() {
- return coll.find({ crud: { $exists: true } }).itcount() > 0;
+ return coll.find({crud: {$exists: true}}).itcount() > 0;
}, 'No documents with "crud" field have been inserted or updated', 60 * 1000);
- res = coll.ensureIndex({ x: 1 }, { background: true });
+ res = coll.ensureIndex({x: 1}, {background: true});
assertAlways.commandWorked(res, tojson(res));
}
}
@@ -58,15 +58,15 @@ var $config = (function() {
// Insert documents with an increasing value of index x.
var coll = db[collName];
var res;
- var count = coll.find({ tid: this.tid }).itcount();
+ var count = coll.find({tid: this.tid}).itcount();
var highest = this.getHighestX(coll, this.tid);
for (var i = 0; i < this.nDocumentsToCreate; ++i) {
- res = coll.insert({ x: i + highest + 1, tid: this.tid, crud: 1 });
+ res = coll.insert({x: i + highest + 1, tid: this.tid, crud: 1});
assertAlways.writeOK(res);
assertAlways.eq(res.nInserted, 1, tojson(res));
}
- assertWhenOwnColl.eq(coll.find({ tid: this.tid }).itcount(),
+ assertWhenOwnColl.eq(coll.find({tid: this.tid}).itcount(),
this.nDocumentsToCreate + count,
'createDocs itcount mismatch');
}
@@ -75,21 +75,19 @@ var $config = (function() {
// Read random documents from the collection on index x.
var coll = db[collName];
var res;
- var count = coll.find({ tid: this.tid }).itcount();
- assertWhenOwnColl.gte(count,
- this.nDocumentsToRead,
- 'readDocs not enough documents for tid ' + this.tid);
+ var count = coll.find({tid: this.tid}).itcount();
+ assertWhenOwnColl.gte(
+ count, this.nDocumentsToRead, 'readDocs not enough documents for tid ' + this.tid);
var highest = this.getHighestX(coll, this.tid);
for (var i = 0; i < this.nDocumentsToRead; ++i) {
// Do randomized reads on index x. A document is not guaranteed
// to match the randomized 'x' predicate.
- res = coll.find({ x: Random.randInt(highest), tid: this.tid }).itcount();
- assertWhenOwnColl.contains(res, [ 0, 1 ], tojson(res));
+ res = coll.find({x: Random.randInt(highest), tid: this.tid}).itcount();
+ assertWhenOwnColl.contains(res, [0, 1], tojson(res));
}
- assertWhenOwnColl.eq(coll.find({ tid: this.tid }).itcount(),
- count,
- 'readDocs itcount mismatch');
+ assertWhenOwnColl.eq(
+ coll.find({tid: this.tid}).itcount(), count, 'readDocs itcount mismatch');
}
function updateDocs(db, collName) {
@@ -98,7 +96,7 @@ var $config = (function() {
if (!isMongos(db)) {
var coll = db[collName];
var res;
- var count = coll.find({ tid: this.tid }).itcount();
+ var count = coll.find({tid: this.tid}).itcount();
assertWhenOwnColl.gte(count,
this.nDocumentsToUpdate,
'updateDocs not enough documents for tid ' + this.tid);
@@ -107,18 +105,17 @@ var $config = (function() {
for (var i = 0; i < this.nDocumentsToUpdate; ++i) {
// Do randomized updates on index x. A document is not guaranteed
// to match the randomized 'x' predicate.
- res = coll.update({ x: Random.randInt(highest), tid: this.tid },
- { $inc: { crud: 1 } });
+ res = coll.update({x: Random.randInt(highest), tid: this.tid},
+ {$inc: {crud: 1}});
assertAlways.writeOK(res);
if (db.getMongo().writeMode() === 'commands') {
- assertWhenOwnColl.contains(res.nModified, [ 0, 1 ], tojson(res));
+ assertWhenOwnColl.contains(res.nModified, [0, 1], tojson(res));
}
- assertWhenOwnColl.contains(res.nMatched, [ 0, 1 ], tojson(res));
+ assertWhenOwnColl.contains(res.nMatched, [0, 1], tojson(res));
assertWhenOwnColl.eq(res.nUpserted, 0, tojson(res));
}
- assertWhenOwnColl.eq(coll.find({ tid: this.tid }).itcount(),
- count,
- 'updateDocs itcount mismatch');
+ assertWhenOwnColl.eq(
+ coll.find({tid: this.tid}).itcount(), count, 'updateDocs itcount mismatch');
}
}
@@ -126,7 +123,7 @@ var $config = (function() {
// Remove random documents from the collection on index x.
var coll = db[collName];
var res;
- var count = coll.find({ tid: this.tid }).itcount();
+ var count = coll.find({tid: this.tid}).itcount();
// Set the maximum number of documents we can delete to ensure that there
// are documents to read or update after deleteDocs completes.
@@ -145,12 +142,12 @@ var $config = (function() {
for (var i = 0; i < nDeleteDocs; ++i) {
// Do randomized deletes on index x. A document is not guaranteed
// to match the randomized 'x' predicate.
- res = coll.remove({ x: Random.randInt(highest), tid: this.tid });
+ res = coll.remove({x: Random.randInt(highest), tid: this.tid});
assertAlways.writeOK(res);
- assertWhenOwnColl.contains(res.nRemoved, [ 0, 1 ], tojson(res));
+ assertWhenOwnColl.contains(res.nRemoved, [0, 1], tojson(res));
nActualDeletes += res.nRemoved;
}
- assertWhenOwnColl.eq(coll.find({ tid: this.tid }).itcount(),
+ assertWhenOwnColl.eq(coll.find({tid: this.tid}).itcount(),
count - nActualDeletes,
'deleteDocs itcount mismatch');
}
@@ -166,16 +163,11 @@ var $config = (function() {
})();
var transitions = {
- init:
- { createDocs: 1 },
- createDocs:
- { createDocs: 0.25, readDocs: 0.25, updateDocs: 0.25, deleteDocs: 0.25 },
- readDocs:
- { createDocs: 0.25, readDocs: 0.25, updateDocs: 0.25, deleteDocs: 0.25 },
- updateDocs:
- { createDocs: 0.25, readDocs: 0.25, updateDocs: 0.25, deleteDocs: 0.25 },
- deleteDocs:
- { createDocs: 0.25, readDocs: 0.25, updateDocs: 0.25, deleteDocs: 0.25 },
+ init: {createDocs: 1},
+ createDocs: {createDocs: 0.25, readDocs: 0.25, updateDocs: 0.25, deleteDocs: 0.25},
+ readDocs: {createDocs: 0.25, readDocs: 0.25, updateDocs: 0.25, deleteDocs: 0.25},
+ updateDocs: {createDocs: 0.25, readDocs: 0.25, updateDocs: 0.25, deleteDocs: 0.25},
+ deleteDocs: {createDocs: 0.25, readDocs: 0.25, updateDocs: 0.25, deleteDocs: 0.25},
};
var internalQueryExecYieldIterations;
@@ -185,12 +177,12 @@ var $config = (function() {
var nSetupDocs = this.nDocumentsToSeed * 200;
var coll = db[collName];
- var res = coll.ensureIndex({ tid: 1 });
+ var res = coll.ensureIndex({tid: 1});
assertAlways.commandWorked(res, tojson(res));
var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < nSetupDocs; ++i) {
- bulk.insert({ x: i });
+ bulk.insert({x: i});
}
res = bulk.execute();
assertAlways.writeOK(res);
@@ -199,11 +191,11 @@ var $config = (function() {
// Increase the following parameters to reduce the number of yields.
cluster.executeOnMongodNodes(function(db) {
var res;
- res = db.adminCommand({ setParameter: 1, internalQueryExecYieldIterations: 100000 });
+ res = db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 100000});
assertAlways.commandWorked(res);
internalQueryExecYieldIterations = res.was;
- res = db.adminCommand({ setParameter: 1, internalQueryExecYieldPeriodMS: 10000 });
+ res = db.adminCommand({setParameter: 1, internalQueryExecYieldPeriodMS: 10000});
assertAlways.commandWorked(res);
internalQueryExecYieldPeriodMS = res.was;
});
@@ -211,18 +203,14 @@ var $config = (function() {
function teardown(db, collName, cluster) {
cluster.executeOnMongodNodes(function(db) {
- assertAlways.commandWorked(
- db.adminCommand({
- setParameter: 1,
- internalQueryExecYieldIterations: internalQueryExecYieldIterations
- })
- );
- assertAlways.commandWorked(
- db.adminCommand({
- setParameter: 1,
- internalQueryExecYieldPeriodMS: internalQueryExecYieldPeriodMS
- })
- );
+ assertAlways.commandWorked(db.adminCommand({
+ setParameter: 1,
+ internalQueryExecYieldIterations: internalQueryExecYieldIterations
+ }));
+ assertAlways.commandWorked(db.adminCommand({
+ setParameter: 1,
+ internalQueryExecYieldPeriodMS: internalQueryExecYieldPeriodMS
+ }));
});
}
diff --git a/jstests/concurrency/fsm_workloads/distinct.js b/jstests/concurrency/fsm_workloads/distinct.js
index b8598984f80..c76b5e972f5 100644
--- a/jstests/concurrency/fsm_workloads/distinct.js
+++ b/jstests/concurrency/fsm_workloads/distinct.js
@@ -7,14 +7,14 @@
* The indexed field contains unique values.
* Each thread operates on a separate collection.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
var $config = (function() {
var data = {
numDocs: 1000,
prefix: 'distinct_fsm',
- shardKey: { i: 1 }
+ shardKey: {i: 1}
};
var states = (function() {
@@ -23,12 +23,12 @@ var $config = (function() {
this.threadCollName = this.prefix + '_' + this.tid;
var bulk = db[this.threadCollName].initializeUnorderedBulkOp();
for (var i = 0; i < this.numDocs; ++i) {
- bulk.insert({ i: i });
+ bulk.insert({i: i});
}
var res = bulk.execute();
assertAlways.writeOK(res);
assertAlways.eq(this.numDocs, res.nInserted);
- assertAlways.commandWorked(db[this.threadCollName].ensureIndex({ i: 1 }));
+ assertAlways.commandWorked(db[this.threadCollName].ensureIndex({i: 1}));
}
function distinct(db, collName) {
@@ -43,8 +43,8 @@ var $config = (function() {
})();
var transitions = {
- init: { distinct: 1 },
- distinct: { distinct: 1 }
+ init: {distinct: 1},
+ distinct: {distinct: 1}
};
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/distinct_noindex.js b/jstests/concurrency/fsm_workloads/distinct_noindex.js
index 6a38830f9d6..b55d1e58d3b 100644
--- a/jstests/concurrency/fsm_workloads/distinct_noindex.js
+++ b/jstests/concurrency/fsm_workloads/distinct_noindex.js
@@ -24,7 +24,7 @@ var $config = (function() {
var bulk = db[collName].initializeUnorderedBulkOp();
for (var i = 0; i < this.numDocs; ++i) {
- bulk.insert({ i: i % this.modulus, tid: this.tid });
+ bulk.insert({i: i % this.modulus, tid: this.tid});
}
var res = bulk.execute();
assertAlways.writeOK(res);
@@ -32,8 +32,7 @@ var $config = (function() {
}
function distinct(db, collName) {
- assertWhenOwnColl.eq(this.modulus,
- db[collName].distinct('i', { tid: this.tid }).length);
+ assertWhenOwnColl.eq(this.modulus, db[collName].distinct('i', {tid: this.tid}).length);
}
return {
@@ -44,8 +43,8 @@ var $config = (function() {
})();
var transitions = {
- init: { distinct: 1 },
- distinct: { distinct: 1 }
+ init: {distinct: 1},
+ distinct: {distinct: 1}
};
return {
diff --git a/jstests/concurrency/fsm_workloads/distinct_projection.js b/jstests/concurrency/fsm_workloads/distinct_projection.js
index d934c329f20..cf8d5ab9501 100644
--- a/jstests/concurrency/fsm_workloads/distinct_projection.js
+++ b/jstests/concurrency/fsm_workloads/distinct_projection.js
@@ -7,17 +7,21 @@
* The indexed field contains unique values.
* Each thread operates on a separate collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/distinct.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/distinct.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
- $config.data.prefix = 'distinct_projection_fsm';
+var $config = extendWorkload($config,
+ function($config, $super) {
+ $config.data.prefix = 'distinct_projection_fsm';
- $config.states.distinct = function distinct(db, collName) {
- var query = { i: { $lt: this.numDocs / 2 } };
- assertWhenOwnColl.eq(this.numDocs / 2,
- db[this.threadCollName].distinct('i', query).length);
- };
+ $config.states.distinct = function distinct(db, collName) {
+ var query = {
+ i: {$lt: this.numDocs / 2}
+ };
+ assertWhenOwnColl.eq(
+ this.numDocs / 2,
+ db[this.threadCollName].distinct('i', query).length);
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/drop_collection.js b/jstests/concurrency/fsm_workloads/drop_collection.js
index 1f92541e9fe..64a60ef8e79 100644
--- a/jstests/concurrency/fsm_workloads/drop_collection.js
+++ b/jstests/concurrency/fsm_workloads/drop_collection.js
@@ -38,8 +38,8 @@ var $config = (function() {
})();
var transitions = {
- init: { createAndDrop: 1 },
- createAndDrop: { createAndDrop: 1 }
+ init: {createAndDrop: 1},
+ createAndDrop: {createAndDrop: 1}
};
return {
diff --git a/jstests/concurrency/fsm_workloads/drop_database.js b/jstests/concurrency/fsm_workloads/drop_database.js
index fee8ab3d1a3..9a6b9e0fb80 100644
--- a/jstests/concurrency/fsm_workloads/drop_database.js
+++ b/jstests/concurrency/fsm_workloads/drop_database.js
@@ -25,8 +25,8 @@ var $config = (function() {
};
var transitions = {
- init: { createAndDrop: 1 },
- createAndDrop: { createAndDrop: 1 }
+ init: {createAndDrop: 1},
+ createAndDrop: {createAndDrop: 1}
};
return {
diff --git a/jstests/concurrency/fsm_workloads/explain.js b/jstests/concurrency/fsm_workloads/explain.js
index e5199bcc377..983218a7fbf 100644
--- a/jstests/concurrency/fsm_workloads/explain.js
+++ b/jstests/concurrency/fsm_workloads/explain.js
@@ -6,14 +6,14 @@
* Runs explain() on a collection.
*
*/
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod
var $config = (function() {
var data = {
collNotExist: 'donotexist__',
nInserted: 0,
- shardKey: { j: 1 },
+ shardKey: {j: 1},
assignEqualProbsToTransitions: function assignEqualProbsToTransitions(statesMap) {
var states = Object.keys(statesMap);
assertAlways.gt(states.length, 0);
@@ -27,28 +27,22 @@ var $config = (function() {
};
function setup(db, collName, cluster) {
- assertAlways.commandWorked(db[collName].ensureIndex({ j: 1 }));
+ assertAlways.commandWorked(db[collName].ensureIndex({j: 1}));
}
var states = (function() {
function insert(db, collName) {
- db[collName].insert({
- i: this.nInserted,
- j: 2 * this.nInserted
- });
+ db[collName].insert({i: this.nInserted, j: 2 * this.nInserted});
this.nInserted++;
}
function explain(db, collName) {
// test the three verbosity levels:
// 'queryPlanner', 'executionStats', and 'allPlansExecution'
- ['queryPlanner', 'executionStats', 'allPlansExecution'].forEach(
- function(verbosity) {
- assertAlways.commandWorked(db[collName]
- .find({ j: this.nInserted / 2 })
- .explain(verbosity));
- }.bind(this)
- );
+ ['queryPlanner', 'executionStats', 'allPlansExecution'].forEach(function(verbosity) {
+ assertAlways.commandWorked(
+ db[collName].find({j: this.nInserted / 2}).explain(verbosity));
+ }.bind(this));
}
function explainNonExistentNS(db, collName) {
@@ -76,9 +70,9 @@ var $config = (function() {
})();
var transitions = {
- insert: { insert: 0.1, explain: 0.8, explainNonExistentNS: 0.1 },
- explain: { insert: 0.7, explain: 0.2, explainNonExistentNS: 0.1 },
- explainNonExistentNS: { insert: 0.4, explain: 0.5, explainNonExistentNS: 0.1 }
+ insert: {insert: 0.1, explain: 0.8, explainNonExistentNS: 0.1},
+ explain: {insert: 0.7, explain: 0.2, explainNonExistentNS: 0.1},
+ explainNonExistentNS: {insert: 0.4, explain: 0.5, explainNonExistentNS: 0.1}
};
return {
diff --git a/jstests/concurrency/fsm_workloads/explain_aggregate.js b/jstests/concurrency/fsm_workloads/explain_aggregate.js
index 02a00923c0d..82542be4cc4 100644
--- a/jstests/concurrency/fsm_workloads/explain_aggregate.js
+++ b/jstests/concurrency/fsm_workloads/explain_aggregate.js
@@ -6,40 +6,46 @@
* Runs explain() and aggregate() on a collection.
*
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
-
-var $config = extendWorkload($config, function($config, $super) {
-
- function assertCursorStages(num, obj) {
- assertAlways(obj.stages, tojson(obj));
- assertAlways.eq(num, obj.stages.length, tojson(obj.stages));
- assertAlways(obj.stages[0].$cursor, tojson(obj.stages[0]));
- assertAlways(obj.stages[0].$cursor.hasOwnProperty('queryPlanner'),
- tojson(obj.stages[0].$cursor));
- }
-
- $config.states = Object.extend({
- explainMatch: function explainMatch(db, collName) {
- var res = db[collName].explain().aggregate([{ $match: { i: this.nInserted / 2 } }]);
- assertAlways.commandWorked(res);
-
- // stages reported: $cursor
- assertCursorStages(1, res);
- },
- explainMatchProject: function explainMatchProject(db, collName) {
- var res = db[collName].explain().aggregate([{ $match: { i: this.nInserted / 3 } },
- { $project: { i: 1 } }]);
- assertAlways.commandWorked(res);
-
- // stages reported: $cursor, $project
- assertCursorStages(2, res);
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
+
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+
+ function assertCursorStages(num, obj) {
+ assertAlways(obj.stages, tojson(obj));
+ assertAlways.eq(num, obj.stages.length, tojson(obj.stages));
+ assertAlways(obj.stages[0].$cursor, tojson(obj.stages[0]));
+ assertAlways(obj.stages[0].$cursor.hasOwnProperty('queryPlanner'),
+ tojson(obj.stages[0].$cursor));
}
- }, $super.states);
- $config.transitions = Object.extend({
- explain: $config.data.assignEqualProbsToTransitions($config.states)
- }, $super.transitions);
-
- return $config;
-});
+ $config.states = Object.extend(
+ {
+ explainMatch: function explainMatch(db, collName) {
+ var res = db[collName].explain().aggregate([{$match: {i: this.nInserted / 2}}]);
+ assertAlways.commandWorked(res);
+
+ // stages reported: $cursor
+ assertCursorStages(1, res);
+ },
+ explainMatchProject: function explainMatchProject(db, collName) {
+ var res =
+ db[collName]
+ .explain()
+ .aggregate([{$match: {i: this.nInserted / 3}}, {$project: {i: 1}}]);
+ assertAlways.commandWorked(res);
+
+ // stages reported: $cursor, $project
+ assertCursorStages(2, res);
+ }
+ },
+ $super.states);
+
+ $config.transitions =
+ Object.extend({explain: $config.data.assignEqualProbsToTransitions($config.states)},
+ $super.transitions);
+
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/explain_count.js b/jstests/concurrency/fsm_workloads/explain_count.js
index 0b44073195d..05cfcc5ba87 100644
--- a/jstests/concurrency/fsm_workloads/explain_count.js
+++ b/jstests/concurrency/fsm_workloads/explain_count.js
@@ -5,55 +5,64 @@
*
* Runs explain() and count() on a collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
-load('jstests/libs/analyze_plan.js'); // for planHasStage
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
+load('jstests/libs/analyze_plan.js'); // for planHasStage
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- function assertNCounted(num, obj, db) {
- var stage = obj.executionStats.executionStages;
- // get sharded stage(s) if counting on mongos
- if (isMongos(db)) {
- stage = stage.shards[0].executionStages;
+ function assertNCounted(num, obj, db) {
+ var stage = obj.executionStats.executionStages;
+ // get sharded stage(s) if counting on mongos
+ if (isMongos(db)) {
+ stage = stage.shards[0].executionStages;
+ }
+ assertWhenOwnColl.eq(num, stage.nCounted);
}
- assertWhenOwnColl.eq(num, stage.nCounted);
- }
- $config.states = Object.extend({
- explainBasicCount: function explainBasicCount(db, collName) {
- var res = db[collName].explain().count();
- assertAlways.commandWorked(res);
- assertAlways(planHasStage(res.queryPlanner.winningPlan, 'COUNT'));
- },
- explainCountHint: function explainCountHint(db, collName) {
- assertWhenOwnColl(function() {
- var res = db[collName].explain()
- .find({ i: this.nInserted / 2 })
- .hint({ i: 1 }).count();
- assertWhenOwnColl.commandWorked(res);
- assertWhenOwnColl(planHasStage(res.queryPlanner.winningPlan, 'COUNT'));
- assertWhenOwnColl(planHasStage(res.queryPlanner.winningPlan, 'COUNT_SCAN'));
- });
- },
- explainCountNoSkipLimit: function explainCountNoSkipLimit(db, collName) {
- var res = db[collName].explain('executionStats')
- .find({ i: this.nInserted }).skip(1).count(false);
- assertAlways.commandWorked(res);
- assertNCounted(1, res, db);
- },
- explainCountSkipLimit: function explainCountSkipLimit(db, collName) {
- var res = db[collName].explain('executionStats')
- .find({ i: this.nInserted }).skip(1).count(true);
- assertAlways.commandWorked(res);
- assertNCounted(0, res, db);
- }
- }, $super.states);
+ $config.states = Object.extend(
+ {
+ explainBasicCount: function explainBasicCount(db, collName) {
+ var res = db[collName].explain().count();
+ assertAlways.commandWorked(res);
+ assertAlways(planHasStage(res.queryPlanner.winningPlan, 'COUNT'));
+ },
+ explainCountHint: function explainCountHint(db, collName) {
+ assertWhenOwnColl(function() {
+ var res =
+ db[collName].explain().find({i: this.nInserted / 2}).hint({i: 1}).count();
+ assertWhenOwnColl.commandWorked(res);
+ assertWhenOwnColl(planHasStage(res.queryPlanner.winningPlan, 'COUNT'));
+ assertWhenOwnColl(planHasStage(res.queryPlanner.winningPlan, 'COUNT_SCAN'));
+ });
+ },
+ explainCountNoSkipLimit: function explainCountNoSkipLimit(db, collName) {
+ var res = db[collName]
+ .explain('executionStats')
+ .find({i: this.nInserted})
+ .skip(1)
+ .count(false);
+ assertAlways.commandWorked(res);
+ assertNCounted(1, res, db);
+ },
+ explainCountSkipLimit: function explainCountSkipLimit(db, collName) {
+ var res = db[collName]
+ .explain('executionStats')
+ .find({i: this.nInserted})
+ .skip(1)
+ .count(true);
+ assertAlways.commandWorked(res);
+ assertNCounted(0, res, db);
+ }
+ },
+ $super.states);
- $config.transitions = Object.extend({
- explain: $config.data.assignEqualProbsToTransitions($config.states)
- }, $super.transitions);
+ $config.transitions =
+ Object.extend({explain: $config.data.assignEqualProbsToTransitions($config.states)},
+ $super.transitions);
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/explain_distinct.js b/jstests/concurrency/fsm_workloads/explain_distinct.js
index 65588909917..b772ac3ac25 100644
--- a/jstests/concurrency/fsm_workloads/explain_distinct.js
+++ b/jstests/concurrency/fsm_workloads/explain_distinct.js
@@ -5,28 +5,32 @@
*
* Runs explain() and distinct() on a collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
-load('jstests/libs/analyze_plan.js'); // for planHasStage
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
+load('jstests/libs/analyze_plan.js'); // for planHasStage
-var $config = extendWorkload($config, function($config, $super) {
- $config.states = Object.extend({
- explainBasicDistinct: function (db, collName) {
- var res = db[collName].explain().distinct('i');
- assertAlways.commandWorked(res);
- assertAlways(planHasStage(res.queryPlanner.winningPlan, 'COLLSCAN'));
- },
- explainDistinctIndex: function (db, collName) {
- var res = db[collName].explain().distinct('_id');
- assertAlways.commandWorked(res);
- assertAlways(planHasStage(res.queryPlanner.winningPlan, 'PROJECTION'));
- assertAlways(planHasStage(res.queryPlanner.winningPlan, 'DISTINCT_SCAN'));
- }
- }, $super.states);
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+ $config.states = Object.extend(
+ {
+ explainBasicDistinct: function(db, collName) {
+ var res = db[collName].explain().distinct('i');
+ assertAlways.commandWorked(res);
+ assertAlways(planHasStage(res.queryPlanner.winningPlan, 'COLLSCAN'));
+ },
+ explainDistinctIndex: function(db, collName) {
+ var res = db[collName].explain().distinct('_id');
+ assertAlways.commandWorked(res);
+ assertAlways(planHasStage(res.queryPlanner.winningPlan, 'PROJECTION'));
+ assertAlways(planHasStage(res.queryPlanner.winningPlan, 'DISTINCT_SCAN'));
+ }
+ },
+ $super.states);
- $config.transitions = Object.extend({
- explain: $config.data.assignEqualProbsToTransitions($config.states)
- }, $super.transitions);
+ $config.transitions =
+ Object.extend({explain: $config.data.assignEqualProbsToTransitions($config.states)},
+ $super.transitions);
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/explain_find.js b/jstests/concurrency/fsm_workloads/explain_find.js
index acb189d24be..0712c94f483 100644
--- a/jstests/concurrency/fsm_workloads/explain_find.js
+++ b/jstests/concurrency/fsm_workloads/explain_find.js
@@ -6,61 +6,66 @@
* Runs explain() and find() on a collection.
*
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
-load('jstests/libs/analyze_plan.js'); // for planHasStage and isIxscan
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
+load('jstests/libs/analyze_plan.js'); // for planHasStage and isIxscan
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- $config.states = Object.extend({
- explainLimit: function explainLimit(db, collName) {
- var res = db[collName].find().limit(3).explain();
- assertAlways.commandWorked(res);
- assertAlways(planHasStage(res.queryPlanner.winningPlan, 'LIMIT'));
- },
- explainBatchSize: function explainBatchSize(db, collName) {
- var res = db[collName].find().batchSize(3).explain();
- assertAlways.commandWorked(res);
- },
- explainAddOption: function explainAddOption(db, collName) {
- var res = db[collName].explain().find().addOption(DBQuery.Option.exhaust).finish();
- assertAlways.commandWorked(res);
- },
- explainSkip: function explainSkip(db, collName) {
- var res = db[collName].explain().find().skip(3).finish();
- assertAlways.commandWorked(res);
- assertAlways(planHasStage(res.queryPlanner.winningPlan, 'SKIP'));
- },
- explainSort: function explainSort(db, collName) {
- var res = db[collName].find().sort({ i: -1 }).explain();
- assertAlways.commandWorked(res);
- assertAlways(planHasStage(res.queryPlanner.winningPlan, 'SORT'));
- },
- explainHint: function explainHint(db, collName) {
- assertWhenOwnColl(function() {
- var res = db[collName].find().hint({ j: 1 }).explain();
- assertWhenOwnColl.commandWorked(res);
- assertWhenOwnColl(isIxscan(res.queryPlanner.winningPlan));
- });
- },
- explainMaxTimeMS: function explainMaxTimeMS(db, collName) {
- var res = db[collName].find().maxTimeMS(2000).explain();
- assertAlways.commandWorked(res);
- },
- explainSnapshot: function explainSnapshot(db, collName) {
- var res = db[collName].find().snapshot().explain();
- assertAlways.commandWorked(res);
- assertWhenOwnColl(isIxscan(res.queryPlanner.winningPlan));
- }
- }, $super.states);
+ $config.states = Object.extend(
+ {
+ explainLimit: function explainLimit(db, collName) {
+ var res = db[collName].find().limit(3).explain();
+ assertAlways.commandWorked(res);
+ assertAlways(planHasStage(res.queryPlanner.winningPlan, 'LIMIT'));
+ },
+ explainBatchSize: function explainBatchSize(db, collName) {
+ var res = db[collName].find().batchSize(3).explain();
+ assertAlways.commandWorked(res);
+ },
+ explainAddOption: function explainAddOption(db, collName) {
+ var res =
+ db[collName].explain().find().addOption(DBQuery.Option.exhaust).finish();
+ assertAlways.commandWorked(res);
+ },
+ explainSkip: function explainSkip(db, collName) {
+ var res = db[collName].explain().find().skip(3).finish();
+ assertAlways.commandWorked(res);
+ assertAlways(planHasStage(res.queryPlanner.winningPlan, 'SKIP'));
+ },
+ explainSort: function explainSort(db, collName) {
+ var res = db[collName].find().sort({i: -1}).explain();
+ assertAlways.commandWorked(res);
+ assertAlways(planHasStage(res.queryPlanner.winningPlan, 'SORT'));
+ },
+ explainHint: function explainHint(db, collName) {
+ assertWhenOwnColl(function() {
+ var res = db[collName].find().hint({j: 1}).explain();
+ assertWhenOwnColl.commandWorked(res);
+ assertWhenOwnColl(isIxscan(res.queryPlanner.winningPlan));
+ });
+ },
+ explainMaxTimeMS: function explainMaxTimeMS(db, collName) {
+ var res = db[collName].find().maxTimeMS(2000).explain();
+ assertAlways.commandWorked(res);
+ },
+ explainSnapshot: function explainSnapshot(db, collName) {
+ var res = db[collName].find().snapshot().explain();
+ assertAlways.commandWorked(res);
+ assertWhenOwnColl(isIxscan(res.queryPlanner.winningPlan));
+ }
+ },
+ $super.states);
- $config.transitions = Object.extend({
- explain: $config.data.assignEqualProbsToTransitions($config.states)
- }, $super.transitions);
+ $config.transitions =
+ Object.extend({explain: $config.data.assignEqualProbsToTransitions($config.states)},
+ $super.transitions);
- // doubling number of iterations so there is a higher chance we will
- // transition to each of the 8 new states at least once
- $config.iterations = $super.iterations * 2;
+ // doubling number of iterations so there is a higher chance we will
+ // transition to each of the 8 new states at least once
+ $config.iterations = $super.iterations * 2;
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/explain_group.js b/jstests/concurrency/fsm_workloads/explain_group.js
index d99a60d7c42..007c703c648 100644
--- a/jstests/concurrency/fsm_workloads/explain_group.js
+++ b/jstests/concurrency/fsm_workloads/explain_group.js
@@ -6,24 +6,27 @@
* Runs explain() and group() on a collection.
*
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
-load('jstests/libs/analyze_plan.js'); // for planHasStage
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
+load('jstests/libs/analyze_plan.js'); // for planHasStage
-var $config = extendWorkload($config, function($config, $super) {
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
- $config.states = Object.extend({
- explainBasicGroup: function explainBasicGroup(db, collName) {
- var res = db[collName].explain().group(
- { key: { i: 1 }, initial: {}, reduce: function() {} }
- );
- assertAlways.commandWorked(res);
- }
- }, $super.states);
+ $config.states = Object.extend(
+ {
+ explainBasicGroup: function explainBasicGroup(db, collName) {
+ var res = db[collName].explain().group(
+ {key: {i: 1}, initial: {}, reduce: function() {}});
+ assertAlways.commandWorked(res);
+ }
+ },
+ $super.states);
- $config.transitions = Object.extend({
- explain: $config.data.assignEqualProbsToTransitions($config.states)
- }, $super.transitions);
+ $config.transitions = Object.extend(
+ {explain: $config.data.assignEqualProbsToTransitions($config.states)},
+ $super.transitions);
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/explain_remove.js b/jstests/concurrency/fsm_workloads/explain_remove.js
index 02620a92bea..37b451994d9 100644
--- a/jstests/concurrency/fsm_workloads/explain_remove.js
+++ b/jstests/concurrency/fsm_workloads/explain_remove.js
@@ -5,39 +5,45 @@
*
* Runs explain() and remove() on a collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- $config.states = Object.extend({
- explainSingleRemove: function explainSingleRemove(db, collName) {
- var res = db[collName].explain('executionStats')
- .remove({ i: this.nInserted }, /* justOne */ true);
- assertAlways.commandWorked(res);
- assertWhenOwnColl(function() {
- assertWhenOwnColl.eq(1, res.executionStats.totalDocsExamined);
+ $config.states = Object.extend(
+ {
+ explainSingleRemove: function explainSingleRemove(db, collName) {
+ var res = db[collName]
+ .explain('executionStats')
+ .remove({i: this.nInserted}, /* justOne */ true);
+ assertAlways.commandWorked(res);
+ assertWhenOwnColl(function() {
+ assertWhenOwnColl.eq(1, res.executionStats.totalDocsExamined);
- // the document should not have been deleted.
- assertWhenOwnColl.eq(1, db[collName].find({i: this.nInserted}).itcount());
- }.bind(this));
- },
- explainMultiRemove: function explainMultiRemove(db, collName) {
- var res = db[collName].explain('executionStats')
- .remove({i: {$lte: this.nInserted / 2}});
- assertAlways.commandWorked(res);
- assertWhenOwnColl(function() {
- assertWhenOwnColl.eq(this.nInserted / 2 + 1,
- explain.executionStats.totalDocsExamined);
- // no documents should have been deleted
- assertWhenOwnColl.eq(this.nInserted, db[collName].itcount());
- }.bind(this));
- }
- }, $super.states);
+ // the document should not have been deleted.
+ assertWhenOwnColl.eq(1, db[collName].find({i: this.nInserted}).itcount());
+ }.bind(this));
+ },
+ explainMultiRemove: function explainMultiRemove(db, collName) {
+ var res = db[collName]
+ .explain('executionStats')
+ .remove({i: {$lte: this.nInserted / 2}});
+ assertAlways.commandWorked(res);
+ assertWhenOwnColl(function() {
+ assertWhenOwnColl.eq(this.nInserted / 2 + 1,
+ explain.executionStats.totalDocsExamined);
+ // no documents should have been deleted
+ assertWhenOwnColl.eq(this.nInserted, db[collName].itcount());
+ }.bind(this));
+ }
+ },
+ $super.states);
- $config.transitions = Object.extend({
- explain: $config.data.assignEqualProbsToTransitions($config.states)
- }, $super.transitions);
+ $config.transitions =
+ Object.extend({explain: $config.data.assignEqualProbsToTransitions($config.states)},
+ $super.transitions);
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/explain_update.js b/jstests/concurrency/fsm_workloads/explain_update.js
index f72f06babb5..89876439bc2 100644
--- a/jstests/concurrency/fsm_workloads/explain_update.js
+++ b/jstests/concurrency/fsm_workloads/explain_update.js
@@ -5,63 +5,73 @@
*
* Runs explain() and update() on a collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- $config.states = Object.extend({
- explainBasicUpdate: function explainBasicUpdate(db, collName) {
- var res = db[collName].explain('executionStats').update({i: this.nInserted},
- {$set: {j: 49}});
- assertAlways.commandWorked(res);
- assertWhenOwnColl(function() {
- assertWhenOwnColl.eq(1, explain.executionStats.totalDocsExamined);
+ $config.states = Object.extend(
+ {
+ explainBasicUpdate: function explainBasicUpdate(db, collName) {
+ var res = db[collName]
+ .explain('executionStats')
+ .update({i: this.nInserted}, {$set: {j: 49}});
+ assertAlways.commandWorked(res);
+ assertWhenOwnColl(function() {
+ assertWhenOwnColl.eq(1, explain.executionStats.totalDocsExamined);
- // document should not have been updated.
- var doc = db[collName].findOne({ i: this.nInserted });
- assertWhenOwnColl.eq(2 * this.nInserted, doc.j);
- }.bind(this));
- },
- explainUpdateUpsert: function explainUpdateUpsert(db, collName) {
- var res = db[collName].explain('executionStats').update({i: 2 * this.nInserted + 1},
- {$set: {j: 81}},
- /* upsert */ true);
- assertAlways.commandWorked(res);
- var stage = res.executionStats.executionStages;
+ // document should not have been updated.
+ var doc = db[collName].findOne({i: this.nInserted});
+ assertWhenOwnColl.eq(2 * this.nInserted, doc.j);
+ }.bind(this));
+ },
+ explainUpdateUpsert: function explainUpdateUpsert(db, collName) {
+ var res = db[collName]
+ .explain('executionStats')
+ .update({i: 2 * this.nInserted + 1},
+ {$set: {j: 81}},
+ /* upsert */ true);
+ assertAlways.commandWorked(res);
+ var stage = res.executionStats.executionStages;
- // if explaining a write command through mongos
- if (isMongos(db)) {
- stage = stage.shards[0].executionStages;
- }
- assertAlways.eq(stage.stage, 'UPDATE');
- assertWhenOwnColl(stage.wouldInsert);
+ // if explaining a write command through mongos
+ if (isMongos(db)) {
+ stage = stage.shards[0].executionStages;
+ }
+ assertAlways.eq(stage.stage, 'UPDATE');
+ assertWhenOwnColl(stage.wouldInsert);
- // make sure that the insert didn't actually happen.
- assertWhenOwnColl.eq(this.nInserted, db[collName].find().itcount());
- },
- explainUpdateMulti: function explainUpdateMulti(db, collName) {
- var res = db[collName].explain('executionStats').update({i: {$lte: 2}}, {$set: {b: 3}},
- /* upsert */ false,
- /* multi */ true);
- assertAlways.commandWorked(res);
- var stage = res.executionStats.executionStages;
+ // make sure that the insert didn't actually happen.
+ assertWhenOwnColl.eq(this.nInserted, db[collName].find().itcount());
+ },
+ explainUpdateMulti: function explainUpdateMulti(db, collName) {
+ var res = db[collName]
+ .explain('executionStats')
+ .update({i: {$lte: 2}},
+ {$set: {b: 3}},
+ /* upsert */ false,
+ /* multi */ true);
+ assertAlways.commandWorked(res);
+ var stage = res.executionStats.executionStages;
- // if explaining a write command through mongos
- if (isMongos(db)) {
- stage = stage.shards[0].executionStages;
- }
- assertAlways.eq(stage.stage, 'UPDATE');
- assertWhenOwnColl(!stage.wouldInsert);
- assertWhenOwnColl.eq(3, stage.nMatched);
- assertWhenOwnColl.eq(3, stage.nWouldModify);
- }
- }, $super.states);
+ // if explaining a write command through mongos
+ if (isMongos(db)) {
+ stage = stage.shards[0].executionStages;
+ }
+ assertAlways.eq(stage.stage, 'UPDATE');
+ assertWhenOwnColl(!stage.wouldInsert);
+ assertWhenOwnColl.eq(3, stage.nMatched);
+ assertWhenOwnColl.eq(3, stage.nWouldModify);
+ }
+ },
+ $super.states);
- $config.transitions = Object.extend({
- explain: $config.data.assignEqualProbsToTransitions($config.states)
- }, $super.transitions);
+ $config.transitions =
+ Object.extend({explain: $config.data.assignEqualProbsToTransitions($config.states)},
+ $super.transitions);
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_inc.js b/jstests/concurrency/fsm_workloads/findAndModify_inc.js
index 2c14791e8d9..cf0a50284ff 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_inc.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_inc.js
@@ -22,14 +22,13 @@ var $config = (function() {
},
update: function update(db, collName) {
- var updateDoc = { $inc: {} };
+ var updateDoc = {
+ $inc: {}
+ };
updateDoc.$inc[this.fieldName] = 1;
- var res = db.runCommand({
- findAndModify: collName,
- query: { _id: 'findAndModify_inc' },
- update: updateDoc
- });
+ var res = db.runCommand(
+ {findAndModify: collName, query: {_id: 'findAndModify_inc'}, update: updateDoc});
assertAlways.commandWorked(res);
// If the document was invalidated during a yield, then we wouldn't have modified it.
@@ -64,13 +63,13 @@ var $config = (function() {
};
var transitions = {
- init: { update: 1 },
- update: { find: 1 },
- find: { update: 1 }
+ init: {update: 1},
+ update: {find: 1},
+ find: {update: 1}
};
function setup(db, collName, cluster) {
- db[collName].insert({ _id: 'findAndModify_inc' });
+ db[collName].insert({_id: 'findAndModify_inc'});
}
return {
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_remove.js b/jstests/concurrency/fsm_workloads/findAndModify_remove.js
index ea89c971ff2..b33e67b2e01 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_remove.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_remove.js
@@ -9,7 +9,7 @@
var $config = (function() {
var data = {
- shardKey: { tid: 1 }
+ shardKey: {tid: 1}
};
var states = (function() {
@@ -19,14 +19,14 @@ var $config = (function() {
}
function insertAndRemove(db, collName) {
- var res = db[collName].insert({ tid: this.tid, value: this.iter });
+ var res = db[collName].insert({tid: this.tid, value: this.iter});
assertAlways.writeOK(res);
assertAlways.eq(1, res.nInserted);
res = db.runCommand({
findandmodify: db[collName].getName(),
- query: { tid: this.tid },
- sort: { iter: -1 },
+ query: {tid: this.tid},
+ sort: {iter: -1},
remove: true
});
assertAlways.commandWorked(res);
@@ -50,8 +50,8 @@ var $config = (function() {
})();
var transitions = {
- init: { insertAndRemove: 1 },
- insertAndRemove: { insertAndRemove: 1 }
+ init: {insertAndRemove: 1},
+ insertAndRemove: {insertAndRemove: 1}
};
return {
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js b/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
index 9df0e8c8636..c08fc5775aa 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
@@ -14,17 +14,22 @@ load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMong
var $config = (function() {
- var data = {
+ var data = {
// Use the workload name as the database name, since the workload name is assumed to be
// unique.
uniqueDBName: 'findAndModify_remove_queue',
newDocForInsert: function newDocForInsert(i) {
- return { _id: i, rand: Random.rand() };
+ return {
+ _id: i,
+ rand: Random.rand()
+ };
},
getIndexSpec: function getIndexSpec() {
- return { rand: 1 };
+ return {
+ rand: 1
+ };
},
opName: 'removed',
@@ -33,10 +38,12 @@ var $config = (function() {
// Use a separate database to avoid conflicts with other FSM workloads.
var ownedDB = db.getSiblingDB(db.getName() + this.uniqueDBName);
- var updateDoc = { $push: {} };
+ var updateDoc = {
+ $push: {}
+ };
updateDoc.$push[this.opName] = id;
- var res = ownedDB[collName].update({ _id: this.tid }, updateDoc, { upsert: true });
+ var res = ownedDB[collName].update({_id: this.tid}, updateDoc, {upsert: true});
assertAlways.writeOK(res);
assertAlways.contains(res.nMatched, [0, 1], tojson(res));
@@ -45,8 +52,7 @@ var $config = (function() {
assertAlways.eq(0, res.nModified, tojson(res));
}
assertAlways.eq(1, res.nUpserted, tojson(res));
- }
- else {
+ } else {
if (ownedDB.getMongo().writeMode() === 'commands') {
assertAlways.eq(1, res.nModified, tojson(res));
}
@@ -61,7 +67,7 @@ var $config = (function() {
var res = db.runCommand({
findAndModify: db[collName].getName(),
query: {},
- sort: { rand: -1 },
+ sort: {rand: -1},
remove: true
});
assertAlways.commandWorked(res);
@@ -86,7 +92,7 @@ var $config = (function() {
})();
var transitions = {
- remove: { remove: 1 }
+ remove: {remove: 1}
};
function setup(db, collName, cluster) {
@@ -98,8 +104,10 @@ var $config = (function() {
var doc = this.newDocForInsert(i);
// Require that documents inserted by this workload use _id values that can be compared
// using the default JS comparator.
- assertAlways.neq(typeof doc._id, 'object', 'default comparator of' +
- ' Array.prototype.sort() is not well-ordered for JS objects');
+ assertAlways.neq(typeof doc._id,
+ 'object',
+ 'default comparator of' +
+ ' Array.prototype.sort() is not well-ordered for JS objects');
bulk.insert(doc);
}
var res = bulk.execute();
@@ -152,9 +160,8 @@ var $config = (function() {
break;
}
- var msg = 'threads ' + tojson(smallest.indices) +
- ' claim to have ' + opName +
- ' a document with _id = ' + tojson(smallest.value);
+ var msg = 'threads ' + tojson(smallest.indices) + ' claim to have ' + opName +
+ ' a document with _id = ' + tojson(smallest.value);
assertWhenOwnColl.eq(1, smallest.indices.length, msg);
indices[smallest.indices[0]]++;
@@ -176,8 +183,7 @@ var $config = (function() {
smallestValueIsSet = true;
smallestValue = value;
smallestIndices = [i];
- }
- else if (value === smallestValue) {
+ } else if (value === smallestValue) {
smallestIndices.push(i);
}
}
@@ -185,7 +191,10 @@ var $config = (function() {
if (!smallestValueIsSet) {
return null;
}
- return { value: smallestValue, indices: smallestIndices };
+ return {
+ value: smallestValue,
+ indices: smallestIndices
+ };
}
}
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update.js b/jstests/concurrency/fsm_workloads/findAndModify_update.js
index 8d6c8b9b2c7..c794c755ed9 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_update.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update.js
@@ -11,14 +11,18 @@
var $config = (function() {
var data = {
- numDocsPerThread: 3, // >1 for 'sort' to be meaningful
- shardKey: { tid: 1 }
+ numDocsPerThread: 3, // >1 for 'sort' to be meaningful
+ shardKey: {tid: 1}
};
var states = (function() {
function makeDoc(tid) {
- return { _id: new ObjectId(), tid: tid, value: 0 };
+ return {
+ _id: new ObjectId(),
+ tid: tid,
+ value: 0
+ };
}
function init(db, collName) {
@@ -34,10 +38,9 @@ var $config = (function() {
var res = db.runCommand({
findandmodify: db[collName].getName(),
- query: { tid: this.tid },
- sort: { value: 1 },
- update: { $max: { value: updatedValue } },
- new: true
+ query: {tid: this.tid},
+ sort: {value: 1},
+ update: {$max: {value: updatedValue}}, new: true
});
assertAlways.commandWorked(res);
@@ -55,10 +58,9 @@ var $config = (function() {
var res = db.runCommand({
findandmodify: db[collName].getName(),
- query: { tid: this.tid },
- sort: { value: -1 },
- update: { $min: { value: updatedValue } },
- new: true
+ query: {tid: this.tid},
+ sort: {value: -1},
+ update: {$min: {value: updatedValue}}, new: true
});
assertAlways.commandWorked(res);
@@ -80,13 +82,13 @@ var $config = (function() {
})();
var transitions = {
- init: { findAndModifyAscending: 0.5, findAndModifyDescending: 0.5 },
- findAndModifyAscending: { findAndModifyAscending: 0.5, findAndModifyDescending: 0.5 },
- findAndModifyDescending: { findAndModifyAscending: 0.5, findAndModifyDescending: 0.5 }
+ init: {findAndModifyAscending: 0.5, findAndModifyDescending: 0.5},
+ findAndModifyAscending: {findAndModifyAscending: 0.5, findAndModifyDescending: 0.5},
+ findAndModifyDescending: {findAndModifyAscending: 0.5, findAndModifyDescending: 0.5}
};
function setup(db, collName, cluster) {
- var res = db[collName].ensureIndex({ tid: 1, value: 1 });
+ var res = db[collName].ensureIndex({tid: 1, value: 1});
assertAlways.commandWorked(res);
}
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js b/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
index abaf073288f..ed874f1bd81 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
@@ -10,17 +10,19 @@
*
* Attempts to force a collection scan by not creating an index.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/findAndModify_update.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/findAndModify_update.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload($config,
+ function($config, $super) {
- // Do not create the { tid: 1, value: 1 } index so that a collection
- // scan is performed for the query and sort operations.
- $config.setup = function setup(db, collName, cluster) { };
+ // Do not create the { tid: 1, value: 1 } index so that a
+ // collection
+ // scan is performed for the query and sort operations.
+ $config.setup = function setup(db, collName, cluster) {};
- // Remove the shardKey so that a collection scan is performed
- delete $config.data.shardKey;
+ // Remove the shardKey so that a collection scan is performed
+ delete $config.data.shardKey;
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js b/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
index b1b0c0add53..277b2882700 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
@@ -8,12 +8,12 @@
* a document move by growing the size of the inserted document using
* the $set and $mul update operators.
*/
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
var $config = (function() {
var data = {
- shardKey: { tid: 1 },
+ shardKey: {tid: 1},
};
var states = (function() {
@@ -30,7 +30,11 @@ var $config = (function() {
function makeDoc(tid) {
// Use 32-bit integer for representing 'length' property
// to ensure $mul does integer multiplication
- var doc = { _id: new ObjectId(), tid: tid, length: new NumberInt(1) };
+ var doc = {
+ _id: new ObjectId(),
+ tid: tid,
+ length: new NumberInt(1)
+ };
doc[uniqueFieldName] = makeStringOfLength(doc.length);
return doc;
}
@@ -53,11 +57,12 @@ var $config = (function() {
}
// Get the DiskLoc of the document before its potential move
- var before = db[collName].find({ tid: this.tid })
- .showDiskLoc()
- .sort({ length: 1 }) // fetch document of smallest size
- .limit(1)
- .next();
+ var before = db[collName]
+ .find({tid: this.tid})
+ .showDiskLoc()
+ .sort({length: 1}) // fetch document of smallest size
+ .limit(1)
+ .next();
// Increase the length of the 'findAndModify_update_grow' string
// to double the size of the overall document
@@ -65,15 +70,17 @@ var $config = (function() {
var updatedLength = factor * this.length;
var updatedValue = makeStringOfLength(updatedLength);
- var update = { $set: {}, $mul: { length: factor } };
+ var update = {
+ $set: {},
+ $mul: {length: factor}
+ };
update.$set[uniqueFieldName] = updatedValue;
var res = db.runCommand({
findandmodify: db[collName].getName(),
- query: { tid: this.tid },
- sort: { length: 1 }, // fetch document of smallest size
- update: update,
- new: true
+ query: {tid: this.tid},
+ sort: {length: 1}, // fetch document of smallest size
+ update: update, new: true
});
assertAlways.commandWorked(res);
@@ -92,14 +99,14 @@ var $config = (function() {
this.bsonsize = Object.bsonsize(doc);
// Get the DiskLoc of the document after its potential move
- var after = db[collName].find({ _id: before._id }).showDiskLoc().next();
+ var after = db[collName].find({_id: before._id}).showDiskLoc().next();
if (isMongod(db) && isMMAPv1(db)) {
// Since the document has at least doubled in size, and the default
// allocation strategy of mmapv1 is to use power of two sizes, the
// document will have always moved
- assertWhenOwnColl.neq(before.$recordId, after.$recordId,
- 'document should have moved');
+ assertWhenOwnColl.neq(
+ before.$recordId, after.$recordId, 'document should have moved');
}
}
@@ -111,8 +118,8 @@ var $config = (function() {
})();
var transitions = {
- insert: { findAndModify: 1 },
- findAndModify: { findAndModify: 1 }
+ insert: {findAndModify: 1},
+ findAndModify: {findAndModify: 1}
};
return {
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_queue.js b/jstests/concurrency/fsm_workloads/findAndModify_update_queue.js
index 0ca53015b68..8ed1a148afa 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_update_queue.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update_queue.js
@@ -11,64 +11,76 @@
* This workload was designed to reproduce an issue similar to SERVER-18304 for update operations
* using the findAndModify command where the old version of the document is returned.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js'); // for $config
load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- // Use the workload name as the database name, since the workload name is assumed to be unique.
- $config.data.uniqueDBName = 'findAndModify_update_queue';
+ // Use the workload name as the database name, since the workload name is assumed to be
+ // unique.
+ $config.data.uniqueDBName = 'findAndModify_update_queue';
- $config.data.newDocForInsert = function newDocForInsert(i) {
- return { _id: i, rand: Random.rand(), counter: 0 };
- };
+ $config.data.newDocForInsert = function newDocForInsert(i) {
+ return {
+ _id: i,
+ rand: Random.rand(),
+ counter: 0
+ };
+ };
- $config.data.getIndexSpec = function getIndexSpec() {
- return { counter: 1, rand: -1 };
- };
+ $config.data.getIndexSpec = function getIndexSpec() {
+ return {
+ counter: 1,
+ rand: -1
+ };
+ };
- $config.data.opName = 'updated';
+ $config.data.opName = 'updated';
- var states = (function() {
+ var states = (function() {
- function update(db, collName) {
- // Update the counter field to avoid matching the same document again.
- var res = db.runCommand({
- findAndModify: db[collName].getName(),
- query: { counter: 0 },
- sort: { rand: -1 },
- update: { $inc: { counter: 1 } },
- new: false
- });
- assertAlways.commandWorked(res);
+ function update(db, collName) {
+ // Update the counter field to avoid matching the same document again.
+ var res = db.runCommand({
+ findAndModify: db[collName].getName(),
+ query: {counter: 0},
+ sort: {rand: -1},
+ update: {$inc: {counter: 1}}, new: false
+ });
+ assertAlways.commandWorked(res);
- var doc = res.value;
- if (isMongod(db) && !isMMAPv1(db)) {
- // MMAPv1 does not automatically retry if there was a conflict, so it is expected
- // that it may return null in the case of a conflict. All other storage engines
- // should automatically retry the operation, and thus should never return null.
- assertWhenOwnColl.neq(
- doc, null, 'findAndModify should have found and updated a matching document');
- }
- if (doc !== null) {
- this.saveDocId(db, collName, doc._id);
+ var doc = res.value;
+ if (isMongod(db) && !isMMAPv1(db)) {
+ // MMAPv1 does not automatically retry if there was a conflict, so it is
+ // expected
+ // that it may return null in the case of a conflict. All other storage engines
+ // should automatically retry the operation, and thus should never return null.
+ assertWhenOwnColl.neq(
+ doc,
+ null,
+ 'findAndModify should have found and updated a matching document');
+ }
+ if (doc !== null) {
+ this.saveDocId(db, collName, doc._id);
+ }
}
- }
- return {
- update: update
- };
+ return {
+ update: update
+ };
- })();
+ })();
- var transitions = {
- update: { update: 1 }
- };
+ var transitions = {
+ update: {update: 1}
+ };
- $config.startState = 'update';
- $config.states = states;
- $config.transitions = transitions;
+ $config.startState = 'update';
+ $config.states = states;
+ $config.transitions = transitions;
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_upsert.js b/jstests/concurrency/fsm_workloads/findAndModify_upsert.js
index a1073106ab0..499e8324cae 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_upsert.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_upsert.js
@@ -12,7 +12,7 @@ var $config = (function() {
var data = {
sort: false,
- shardKey: { tid: 1 }
+ shardKey: {tid: 1}
};
var states = (function() {
@@ -41,13 +41,15 @@ var $config = (function() {
var updatedValue = this.iter++;
// Use a query specification that does not match any existing documents
- var query = { _id: new ObjectId(), tid: this.tid };
+ var query = {
+ _id: new ObjectId(),
+ tid: this.tid
+ };
var cmdObj = {
findandmodify: db[collName].getName(),
query: query,
- update: { $setOnInsert: { values: [updatedValue] } },
- new: true,
+ update: {$setOnInsert: {values: [updatedValue]}}, new: true,
upsert: true
};
@@ -74,9 +76,8 @@ var $config = (function() {
var cmdObj = {
findandmodify: db[collName].getName(),
- query: { tid: this.tid },
- update: { $push: { values: updatedValue } },
- new: true,
+ query: {tid: this.tid},
+ update: {$push: {values: updatedValue}}, new: true,
upsert: false
};
@@ -111,9 +112,9 @@ var $config = (function() {
})();
var transitions = {
- init: { upsert: 0.1, update: 0.9 },
- upsert: { upsert: 0.1, update: 0.9 },
- update: { upsert: 0.1, update: 0.9 }
+ init: {upsert: 0.1, update: 0.9},
+ upsert: {upsert: 0.1, update: 0.9},
+ update: {upsert: 0.1, update: 0.9}
};
return {
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js b/jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js
index 200de213235..e9cca5d6d8f 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js
@@ -10,12 +10,15 @@
*
* Forces 'sort' to perform a collection scan by using $natural.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/findAndModify_upsert.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/findAndModify_upsert.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload($config,
+ function($config, $super) {
- $config.data.sort = { $natural: 1 };
+ $config.data.sort = {
+ $natural: 1
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/group.js b/jstests/concurrency/fsm_workloads/group.js
index 34bde848e00..3ccc909e0c9 100644
--- a/jstests/concurrency/fsm_workloads/group.js
+++ b/jstests/concurrency/fsm_workloads/group.js
@@ -18,10 +18,12 @@ var $config = (function() {
return {
group: {
ns: collName,
- initial: { bucketCount: 0, bucketSum: 0},
+ initial: {bucketCount: 0, bucketSum: 0},
$keyf: function $keyf(doc) {
// place doc.rand into appropriate bucket
- return { bucket: Math.floor(doc.rand * 10) + 1 };
+ return {
+ bucket: Math.floor(doc.rand * 10) + 1
+ };
},
$reduce: function $reduce(curr, result) {
result.bucketCount++;
@@ -41,7 +43,7 @@ var $config = (function() {
}, 0);
}
- var data = {
+ var data = {
numDocs: 1000,
generateGroupCmdObj: generateGroupCmdObj,
sumBucketCount: sumBucketCount
@@ -68,13 +70,13 @@ var $config = (function() {
})();
var transitions = {
- group: { group: 1 }
+ group: {group: 1}
};
function setup(db, collName, cluster) {
var bulk = db[collName].initializeUnorderedBulkOp();
for (var i = 0; i < this.numDocs; ++i) {
- bulk.insert({ rand: Random.rand() });
+ bulk.insert({rand: Random.rand()});
}
var res = bulk.execute();
assertAlways.writeOK(res);
diff --git a/jstests/concurrency/fsm_workloads/group_cond.js b/jstests/concurrency/fsm_workloads/group_cond.js
index 7344b781d31..226b9a9afad 100644
--- a/jstests/concurrency/fsm_workloads/group_cond.js
+++ b/jstests/concurrency/fsm_workloads/group_cond.js
@@ -13,28 +13,32 @@
*
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/group.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/group.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
- $config.setup = function setup(db, collName, cluster) {
- $super.setup.apply(this, arguments);
- assertAlways.commandWorked(db[collName].ensureIndex({ rand: 1 }));
- };
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
+ $config.setup = function setup(db, collName, cluster) {
+ $super.setup.apply(this, arguments);
+ assertAlways.commandWorked(db[collName].ensureIndex({rand: 1}));
+ };
- $config.states.group = function group(db, collName) {
- var cmdObj = this.generateGroupCmdObj(collName);
- cmdObj.group.cond = { rand: { $gte: 0.5 } };
- var res = db.runCommand(cmdObj);
- assertWhenOwnColl.commandWorked(res);
+ $config.states.group = function group(db, collName) {
+ var cmdObj = this.generateGroupCmdObj(collName);
+ cmdObj.group.cond = {
+ rand: {$gte: 0.5}
+ };
+ var res = db.runCommand(cmdObj);
+ assertWhenOwnColl.commandWorked(res);
- assertWhenOwnColl.lte(res.count, this.numDocs);
- assertWhenOwnColl.lte(res.keys, 5);
- assertWhenOwnColl(function() {
- assertWhenOwnColl.lte(res.retval.length, 5);
- assertWhenOwnColl.eq(this.sumBucketCount(res.retval), res.count);
- }.bind(this));
- };
+ assertWhenOwnColl.lte(res.count, this.numDocs);
+ assertWhenOwnColl.lte(res.keys, 5);
+ assertWhenOwnColl(function() {
+ assertWhenOwnColl.lte(res.retval.length, 5);
+ assertWhenOwnColl.eq(this.sumBucketCount(res.retval), res.count);
+ }.bind(this));
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_1char.js b/jstests/concurrency/fsm_workloads/indexed_insert_1char.js
index 6df68323dd1..3d90da7470a 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_1char.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_1char.js
@@ -7,20 +7,21 @@
* documents appear in both a collection scan and an index scan. The indexed
* value is a 1-character string based on the thread's id.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload($config,
+ function($config, $super) {
- $config.data.indexedField = 'indexed_insert_1char';
- $config.data.shardKey = {};
- $config.data.shardKey[$config.data.indexedField] = 1;
+ $config.data.indexedField = 'indexed_insert_1char';
+ $config.data.shardKey = {};
+ $config.data.shardKey[$config.data.indexedField] = 1;
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
- this.indexedValue = String.fromCharCode(33 + this.tid);
- };
+ this.indexedValue = String.fromCharCode(33 + this.tid);
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_1char_noindex.js b/jstests/concurrency/fsm_workloads/indexed_insert_1char_noindex.js
index 2a66590fb31..cdbba38b172 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_1char_noindex.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_1char_noindex.js
@@ -5,8 +5,8 @@
*
* Executes the indexed_insert_1char.js workload after dropping its index.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_1char.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_1char.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
var $config = extendWorkload($config, indexedNoindex);
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_2d.js b/jstests/concurrency/fsm_workloads/indexed_insert_2d.js
index 3192aa185cc..c8abb257745 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_2d.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_2d.js
@@ -7,49 +7,51 @@
* appear in both a collection scan and an index scan. The indexed value is a
* legacy coordinate pair, indexed with a 2d index.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- $config.data.indexedField = 'indexed_insert_2d';
- // Remove the shard key for 2d indexes, as they are not supported
- delete $config.data.shardKey;
+ $config.data.indexedField = 'indexed_insert_2d';
+ // Remove the shard key for 2d indexes, as they are not supported
+ delete $config.data.shardKey;
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
- assertAlways.lt(this.tid, 1 << 16); // assume tid is a 16 bit nonnegative int
- // split the tid into the odd bits and the even bits
- // for example:
- // tid: 57 = 00111001
- // even: 0 1 0 1 = 5
- // odd: 0 1 1 0 = 6
- // This lets us turn every tid into a unique pair of numbers within the range [0, 255].
- // The pairs are then normalized to have valid longitude and latitude values.
- var oddBits = 0;
- var evenBits = 0;
- for (var i = 0; i < 16; ++i) {
- if (this.tid & 1 << i) {
- if (i % 2 === 0) {
- // i is even
- evenBits |= 1 << (i / 2);
- } else {
- // i is odd
- oddBits |= 1 << (i / 2);
+ assertAlways.lt(this.tid, 1 << 16); // assume tid is a 16 bit nonnegative int
+ // split the tid into the odd bits and the even bits
+ // for example:
+ // tid: 57 = 00111001
+ // even: 0 1 0 1 = 5
+ // odd: 0 1 1 0 = 6
+ // This lets us turn every tid into a unique pair of numbers within the range [0, 255].
+ // The pairs are then normalized to have valid longitude and latitude values.
+ var oddBits = 0;
+ var evenBits = 0;
+ for (var i = 0; i < 16; ++i) {
+ if (this.tid & 1 << i) {
+ if (i % 2 === 0) {
+ // i is even
+ evenBits |= 1 << (i / 2);
+ } else {
+ // i is odd
+ oddBits |= 1 << (i / 2);
+ }
}
}
- }
- assertAlways.lt(oddBits, 256);
- assertAlways.lt(evenBits, 256);
- this.indexedValue = [(evenBits - 128) / 2, (oddBits - 128) / 2];
- };
+ assertAlways.lt(oddBits, 256);
+ assertAlways.lt(evenBits, 256);
+ this.indexedValue = [(evenBits - 128) / 2, (oddBits - 128) / 2];
+ };
- $config.data.getIndexSpec = function getIndexSpec() {
- var ixSpec = {};
- ixSpec[this.indexedField] = '2d';
- return ixSpec;
- };
+ $config.data.getIndexSpec = function getIndexSpec() {
+ var ixSpec = {};
+ ixSpec[this.indexedField] = '2d';
+ return ixSpec;
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js b/jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js
index 0271c223049..6c8fd86c104 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js
@@ -7,18 +7,19 @@
* appear in both a collection scan and an index scan. The indexed value is a
* legacy coordinate pair, indexed with a 2dsphere index.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_2d.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_2d.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload($config,
+ function($config, $super) {
- $config.data.indexedField = 'indexed_insert_2dsphere';
+ $config.data.indexedField = 'indexed_insert_2dsphere';
- $config.data.getIndexSpec = function getIndexSpec() {
- var ixSpec = {};
- ixSpec[this.indexedField] = '2dsphere';
- return ixSpec;
- };
+ $config.data.getIndexSpec = function getIndexSpec() {
+ var ixSpec = {};
+ ixSpec[this.indexedField] = '2dsphere';
+ return ixSpec;
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_base.js b/jstests/concurrency/fsm_workloads/indexed_insert_base.js
index 07e5e287518..59dcab4f0a0 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_base.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_base.js
@@ -18,7 +18,7 @@ var $config = (function() {
}
var order = ixSpec[field];
- if (order !== 1 && order !== -1) { // e.g. '2d' or '2dsphere'
+ if (order !== 1 && order !== -1) { // e.g. '2d' or '2dsphere'
order = 1;
}
@@ -43,14 +43,16 @@ var $config = (function() {
find: function find(db, collName) {
// collection scan
- var count = db[collName].find(this.getDoc()).sort({ $natural: 1 }).itcount();
+ var count = db[collName].find(this.getDoc()).sort({$natural: 1}).itcount();
assertWhenOwnColl.eq(count, this.nInserted);
// Use hint() to force an index scan, but only when an appropriate index exists.
// We can only use hint() when the index exists and we know that the collection
// is not being potentially modified by other workloads.
var ownColl = false;
- assertWhenOwnColl(function() { ownColl = true; });
+ assertWhenOwnColl(function() {
+ ownColl = true;
+ });
if (this.indexExists && ownColl) {
count = db[collName].find(this.getDoc()).hint(this.getIndexSpec()).itcount();
assertWhenOwnColl.eq(count, this.nInserted);
@@ -68,9 +70,9 @@ var $config = (function() {
};
var transitions = {
- init: { insert: 1 },
- insert: { find: 1 },
- find: { insert: 1 }
+ init: {insert: 1},
+ insert: {find: 1},
+ find: {insert: 1}
};
function setup(db, collName, cluster) {
@@ -96,7 +98,7 @@ var $config = (function() {
return doc;
},
indexedField: 'x',
- shardKey: { x: 1 },
+ shardKey: {x: 1},
docsPerInsert: 1
},
setup: setup
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_base_capped.js b/jstests/concurrency/fsm_workloads/indexed_insert_base_capped.js
index a18fd00d9e0..bdffdce6c8c 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_base_capped.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_base_capped.js
@@ -5,9 +5,9 @@
*
* Executes the indexed_insert_base.js workload on a capped collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/make_capped.js'); // for makeCapped
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/make_capped.js'); // for makeCapped
var $config = extendWorkload($config, makeCapped);
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_base_noindex.js b/jstests/concurrency/fsm_workloads/indexed_insert_base_noindex.js
index e58ff22a5b3..aaa3b2e0e07 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_base_noindex.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_base_noindex.js
@@ -5,8 +5,8 @@
*
* Executes the indexed_insert_base.js workload after dropping its index.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
var $config = extendWorkload($config, indexedNoindex);
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_compound.js b/jstests/concurrency/fsm_workloads/indexed_insert_compound.js
index e57c7a62c1f..fe9641502b0 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_compound.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_compound.js
@@ -7,32 +7,34 @@
* appear in both a collection scan and an index scan. The collection is indexed
* with a compound index on three different fields.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
- };
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
+ };
- $config.data.getDoc = function getDoc() {
- return {
- indexed_insert_compound_x: this.tid & 0x0f, // lowest 4 bits
- indexed_insert_compound_y: this.tid >> 4, // high bits
- indexed_insert_compound_z: String.fromCharCode(33 + this.tid)
- };
- };
+ $config.data.getDoc = function getDoc() {
+ return {
+ indexed_insert_compound_x: this.tid & 0x0f, // lowest 4 bits
+ indexed_insert_compound_y: this.tid >> 4, // high bits
+ indexed_insert_compound_z: String.fromCharCode(33 + this.tid)
+ };
+ };
- $config.data.getIndexSpec = function getIndexSpec() {
- return {
- indexed_insert_compound_x: 1,
- indexed_insert_compound_y: 1,
- indexed_insert_compound_z: 1
- };
- };
+ $config.data.getIndexSpec = function getIndexSpec() {
+ return {
+ indexed_insert_compound_x: 1,
+ indexed_insert_compound_y: 1,
+ indexed_insert_compound_z: 1
+ };
+ };
- $config.data.shardKey = $config.data.getIndexSpec();
+ $config.data.shardKey = $config.data.getIndexSpec();
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_eval.js b/jstests/concurrency/fsm_workloads/indexed_insert_eval.js
index 1d877f6d88b..a7a4797efef 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_eval.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_eval.js
@@ -7,27 +7,29 @@
* Asserts that all documents appear in both a collection scan and an index
* scan. The indexed value is the thread id.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
- $config.data.nolock = false;
+ $config.data.nolock = false;
- $config.states.insert = function insert(db, collName) {
- var evalResult = db.runCommand({
- eval: function(collName, doc) {
- var insertResult = db[collName].insert(doc);
- return tojson(insertResult);
- },
- args: [collName, this.getDoc()],
- nolock: this.nolock
- });
- assertAlways.commandWorked(evalResult);
- var insertResult = JSON.parse(evalResult.retval);
- assertAlways.eq(1, insertResult.nInserted, tojson(insertResult));
- this.nInserted += this.docsPerInsert;
- };
+ $config.states.insert = function insert(db, collName) {
+ var evalResult = db.runCommand({
+ eval: function(collName, doc) {
+ var insertResult = db[collName].insert(doc);
+ return tojson(insertResult);
+ },
+ args: [collName, this.getDoc()],
+ nolock: this.nolock
+ });
+ assertAlways.commandWorked(evalResult);
+ var insertResult = JSON.parse(evalResult.retval);
+ assertAlways.eq(1, insertResult.nInserted, tojson(insertResult));
+ this.nInserted += this.docsPerInsert;
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_eval_nolock.js b/jstests/concurrency/fsm_workloads/indexed_insert_eval_nolock.js
index d7a28961711..d1d2727c0d7 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_eval_nolock.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_eval_nolock.js
@@ -7,12 +7,13 @@
* with the option { nolock: true }. Asserts that all documents appear in both a
* collection scan and an index scan. The indexed value is the thread id.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_eval.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_eval.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload($config,
+ function($config, $super) {
- $config.data.nolock = true;
+ $config.data.nolock = true;
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js b/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js
index 11fee4a4061..c34b986bb7b 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js
@@ -7,49 +7,51 @@
* documents appear in both a collection scan and an index scan. The indexed
* value is a different BSON type, depending on the thread's id.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-
-var $config = extendWorkload($config, function($config, $super) {
-
- $config.data.indexedField = 'indexed_insert_heterogeneous';
- $config.data.shardKey = {};
- $config.data.shardKey[$config.data.indexedField] = 1;
-
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
-
- // prefix str with zeroes to make it have length len
- function pad(len, str) {
- var padding = new Array(len + 1).join('0');
- return (padding + str).slice(-len);
- }
-
- function makeOID(tid) {
- var str = pad(24, tid.toString(16));
- return new ObjectId(str);
- }
-
- function makeDate(tid) {
- var d = new ISODate('2000-01-01T00:00:00.000Z');
- // setSeconds(n) where n >= 60 will just cause the minutes, hours, etc to increase,
- // so this produces a unique date for each tid
- d.setSeconds(tid);
- return d;
- }
-
- var choices = [
- this.tid, // int
- this.tid.toString(), // string
- this.tid * 0.0001, // float
- { tid: this.tid }, // subdocument
- makeOID(this.tid), // objectid
- makeDate(this.tid), // date
- new Function('', 'return ' + this.tid + ';') // function
- ];
-
- this.indexedValue = choices[this.tid % choices.length];
- };
-
- return $config;
-});
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+
+var $config = extendWorkload($config,
+ function($config, $super) {
+
+ $config.data.indexedField = 'indexed_insert_heterogeneous';
+ $config.data.shardKey = {};
+ $config.data.shardKey[$config.data.indexedField] = 1;
+
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
+
+ // prefix str with zeroes to make it have length len
+ function pad(len, str) {
+ var padding = new Array(len + 1).join('0');
+ return (padding + str).slice(-len);
+ }
+
+ function makeOID(tid) {
+ var str = pad(24, tid.toString(16));
+ return new ObjectId(str);
+ }
+
+ function makeDate(tid) {
+ var d = new ISODate('2000-01-01T00:00:00.000Z');
+ // setSeconds(n) where n >= 60 will just cause the minutes,
+ // hours, etc to increase,
+ // so this produces a unique date for each tid
+ d.setSeconds(tid);
+ return d;
+ }
+
+ var choices = [
+ this.tid, // int
+ this.tid.toString(), // string
+ this.tid * 0.0001, // float
+ {tid: this.tid}, // subdocument
+ makeOID(this.tid), // objectid
+ makeDate(this.tid), // date
+ new Function('', 'return ' + this.tid + ';') // function
+ ];
+
+ this.indexedValue = choices[this.tid % choices.length];
+ };
+
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous_noindex.js b/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous_noindex.js
index 99a3e080ef5..56aac8ff2ca 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous_noindex.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous_noindex.js
@@ -5,8 +5,8 @@
*
* Executes the indexed_insert_heterogeneous.js workload after dropping its index.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
var $config = extendWorkload($config, indexedNoindex);
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_large.js b/jstests/concurrency/fsm_workloads/indexed_insert_large.js
index b997dda4978..50317368aa6 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_large.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_large.js
@@ -8,36 +8,41 @@
* value is a string large enough to make the whole index key be 1K, which is
* the maximum.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- $config.data.indexedField = 'indexed_insert_large';
+ $config.data.indexedField = 'indexed_insert_large';
- // Remove the shard key, since it cannot be greater than 512 bytes
- delete $config.data.shardKey;
+ // Remove the shard key, since it cannot be greater than 512 bytes
+ delete $config.data.shardKey;
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
- // "The total size of an index entry, which can include structural overhead depending on the
- // BSON type, must be less than 1024 bytes."
- // http://docs.mongodb.org/manual/reference/limits/
- var maxIndexedSize = 1023;
+ // "The total size of an index entry, which can include structural overhead depending on
+ // the
+ // BSON type, must be less than 1024 bytes."
+ // http://docs.mongodb.org/manual/reference/limits/
+ var maxIndexedSize = 1023;
- var bsonOverhead = Object.bsonsize({ '': '' });
+ var bsonOverhead = Object.bsonsize({'': ''});
- var bigstr = new Array(maxIndexedSize + 1).join('x');
+ var bigstr = new Array(maxIndexedSize + 1).join('x');
- // prefix the big string with tid to make it unique,
- // then trim it down so that it plus bson overhead is maxIndexedSize
+ // prefix the big string with tid to make it unique,
+ // then trim it down so that it plus bson overhead is maxIndexedSize
- this.indexedValue = (this.tid + bigstr).slice(0, maxIndexedSize - bsonOverhead);
+ this.indexedValue = (this.tid + bigstr).slice(0, maxIndexedSize - bsonOverhead);
- assertAlways.eq(maxIndexedSize, Object.bsonsize({ '': this.indexedValue }),
- 'buggy test: the inserted docs will not have the expected index-key size');
- };
+ assertAlways.eq(
+ maxIndexedSize,
+ Object.bsonsize({'': this.indexedValue}),
+ 'buggy test: the inserted docs will not have the expected index-key size');
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_large_noindex.js b/jstests/concurrency/fsm_workloads/indexed_insert_large_noindex.js
index 893c1484a71..98c75cab734 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_large_noindex.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_large_noindex.js
@@ -5,8 +5,8 @@
*
* Executes the indexed_insert_large.js workload after dropping its index.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_large.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_large.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
var $config = extendWorkload($config, indexedNoindex);
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js b/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js
index ea92ebd5d0c..47867362aac 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js
@@ -7,17 +7,19 @@
* documents appear in both a collection scan and an index scan. The indexed
* field name is a long string.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload($config,
+ function($config, $super) {
- // TODO: make this field name even longer?
- var length = 100;
- var prefix = 'indexed_insert_long_fieldname_';
- $config.data.indexedField = prefix + new Array(length - prefix.length + 1).join('x');
- $config.data.shardKey = {};
- $config.data.shardKey[$config.data.indexedField] = 1;
+ // TODO: make this field name even longer?
+ var length = 100;
+ var prefix = 'indexed_insert_long_fieldname_';
+ $config.data.indexedField =
+ prefix + new Array(length - prefix.length + 1).join('x');
+ $config.data.shardKey = {};
+ $config.data.shardKey[$config.data.indexedField] = 1;
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname_noindex.js b/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname_noindex.js
index 74ab2e12786..4466d57efd0 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname_noindex.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname_noindex.js
@@ -5,8 +5,8 @@
*
* Executes the indexed_insert_long_fieldname.js workload after dropping its index.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
var $config = extendWorkload($config, indexedNoindex);
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_multikey.js b/jstests/concurrency/fsm_workloads/indexed_insert_multikey.js
index 5e80c0ae2cd..bff99ae85c7 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_multikey.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_multikey.js
@@ -7,22 +7,24 @@
* documents appear in both a collection scan and an index scan. The indexed
* value is an array of numbers.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
- $config.data.indexedField = 'indexed_insert_multikey';
- // Remove the shard key, since it cannot be a multikey index
- delete $config.data.shardKey;
+ $config.data.indexedField = 'indexed_insert_multikey';
+ // Remove the shard key, since it cannot be a multikey index
+ delete $config.data.shardKey;
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
- this.indexedValue = [0,1,2,3,4,5,6,7,8,9].map(function(n) {
- return this.tid * 10 + n;
- }.bind(this));
- };
+ this.indexedValue = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].map(function(n) {
+ return this.tid * 10 + n;
+ }.bind(this));
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_multikey_noindex.js b/jstests/concurrency/fsm_workloads/indexed_insert_multikey_noindex.js
index 50fd8b8d4e9..9f8e491d2da 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_multikey_noindex.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_multikey_noindex.js
@@ -5,8 +5,8 @@
*
* Executes the indexed_insert_multikey.js workload after dropping its index.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_multikey.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_multikey.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
var $config = extendWorkload($config, indexedNoindex);
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js b/jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js
index 1355d2158c9..17ffec0bb40 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js
@@ -8,31 +8,33 @@
*
* Uses an ordered, bulk operation to perform the inserts.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
- $config.data.indexedField = 'indexed_insert_ordered_bulk';
- $config.data.shardKey = {};
- $config.data.shardKey[$config.data.indexedField] = 1;
+ $config.data.indexedField = 'indexed_insert_ordered_bulk';
+ $config.data.shardKey = {};
+ $config.data.shardKey[$config.data.indexedField] = 1;
- $config.states.insert = function insert(db, collName) {
- var doc = {};
- doc[this.indexedField] = this.indexedValue;
+ $config.states.insert = function insert(db, collName) {
+ var doc = {};
+ doc[this.indexedField] = this.indexedValue;
- var bulk = db[collName].initializeOrderedBulkOp();
- for (var i = 0; i < this.docsPerInsert; ++i) {
- bulk.insert(doc);
- }
- var res = bulk.execute();
- assertAlways.writeOK(res);
- assertAlways.eq(this.docsPerInsert, res.nInserted, tojson(res));
+ var bulk = db[collName].initializeOrderedBulkOp();
+ for (var i = 0; i < this.docsPerInsert; ++i) {
+ bulk.insert(doc);
+ }
+ var res = bulk.execute();
+ assertAlways.writeOK(res);
+ assertAlways.eq(this.docsPerInsert, res.nInserted, tojson(res));
- this.nInserted += this.docsPerInsert;
- };
+ this.nInserted += this.docsPerInsert;
+ };
- $config.data.docsPerInsert = 15;
+ $config.data.docsPerInsert = 15;
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_text.js b/jstests/concurrency/fsm_workloads/indexed_insert_text.js
index b73373b1090..ab38d07098f 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_text.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_text.js
@@ -28,13 +28,13 @@ var $config = (function() {
if (Array.isArray(snippet)) {
snippet = snippet.join(' ');
}
- assertWhenOwnColl.gt(db[collName].find({ $text: { $search: snippet } }).itcount(), 0);
+ assertWhenOwnColl.gt(db[collName].find({$text: {$search: snippet}}).itcount(), 0);
}
};
var transitions = {
- init: { insert: 1 },
- insert: { insert: 1 }
+ init: {insert: 1},
+ insert: {insert: 1}
};
function setup(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js b/jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js
index c372e3d5f51..bacaff869e4 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js
@@ -5,29 +5,33 @@
*
* like indexed_insert_text.js but the indexed value is an array of strings
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_text.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_text.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
- };
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
+ };
- $config.data.getRandomTextSnippet = function getRandomTextSnippet() {
- var len = Random.randInt(5) + 1; // ensure we always add some text, not just empty array
- var textArr = [];
- for (var i = 0; i < len; ++i) {
- textArr.push($super.data.getRandomTextSnippet.call(this, arguments));
- }
- return textArr;
- };
+ $config.data.getRandomTextSnippet = function getRandomTextSnippet() {
+ var len = Random.randInt(5) +
+ 1; // ensure we always add some text, not just empty array
+ var textArr = [];
+ for (var i = 0; i < len; ++i) {
+ textArr.push($super.data.getRandomTextSnippet.call(this, arguments));
+ }
+ return textArr;
+ };
- // SERVER-21291: Reduce the thread count to alleviate PV1 failovers on Windows DEBUG hosts.
- $config.threadCount = 5;
+ // SERVER-21291: Reduce the thread count to alleviate PV1 failovers on
+ // Windows DEBUG hosts.
+ $config.threadCount = 5;
- // Remove the shard key, since it cannot be a multikey index
- delete $config.data.shardKey;
+ // Remove the shard key, since it cannot be a multikey index
+ delete $config.data.shardKey;
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js b/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js
index fbdbb9c523e..90aa6d3baf7 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js
@@ -12,27 +12,26 @@ var $config = (function() {
var states = {
init: function init(db, collName) {
- var res = db[collName].insert({ indexed_insert_ttl: new ISODate(), first: true });
+ var res = db[collName].insert({indexed_insert_ttl: new ISODate(), first: true});
assertAlways.writeOK(res);
assertWhenOwnColl.eq(1, res.nInserted, tojson(res));
},
insert: function insert(db, collName) {
- var res = db[collName].insert({ indexed_insert_ttl: new ISODate() });
+ var res = db[collName].insert({indexed_insert_ttl: new ISODate()});
assertAlways.writeOK(res);
assertWhenOwnColl.eq(1, res.nInserted, tojson(res));
}
};
var transitions = {
- init: { insert: 1 },
- insert: { insert: 1 }
+ init: {insert: 1},
+ insert: {insert: 1}
};
function setup(db, collName, cluster) {
- var res = db[collName].ensureIndex(
- { indexed_insert_ttl: 1 },
- { expireAfterSeconds: this.ttlSeconds });
+ var res = db[collName].ensureIndex({indexed_insert_ttl: 1},
+ {expireAfterSeconds: this.ttlSeconds});
assertAlways.commandWorked(res);
}
@@ -48,7 +47,7 @@ var $config = (function() {
assertWhenOwnColl.soon(function checkTTLCount() {
// All initial documents should be removed by the end of the workload.
- var count = db[collName].find({ first: true }).itcount();
+ var count = db[collName].find({first: true}).itcount();
return count === 0;
}, 'Expected oldest documents with TTL fields to be removed', timeoutMS);
}
@@ -59,10 +58,7 @@ var $config = (function() {
states: states,
transitions: transitions,
setup: setup,
- data: {
- ttlSeconds: 5,
- ttlIndexExists: true
- },
+ data: {ttlSeconds: 5, ttlIndexExists: true},
teardown: teardown
};
})();
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js b/jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js
index caf278d6066..f1d00d7cf64 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js
@@ -8,31 +8,33 @@
*
* Uses an unordered, bulk operation to perform the inserts.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
- $config.data.indexedField = 'indexed_insert_unordered_bulk';
- $config.data.shardKey = {};
- $config.data.shardKey[$config.data.indexedField] = 1;
+ $config.data.indexedField = 'indexed_insert_unordered_bulk';
+ $config.data.shardKey = {};
+ $config.data.shardKey[$config.data.indexedField] = 1;
- $config.states.insert = function insert(db, collName) {
- var doc = {};
- doc[this.indexedField] = this.indexedValue;
+ $config.states.insert = function insert(db, collName) {
+ var doc = {};
+ doc[this.indexedField] = this.indexedValue;
- var bulk = db[collName].initializeUnorderedBulkOp();
- for (var i = 0; i < this.docsPerInsert; ++i) {
- bulk.insert(doc);
- }
- var res = bulk.execute();
- assertAlways.writeOK(res);
- assertAlways.eq(this.docsPerInsert, res.nInserted, tojson(res));
+ var bulk = db[collName].initializeUnorderedBulkOp();
+ for (var i = 0; i < this.docsPerInsert; ++i) {
+ bulk.insert(doc);
+ }
+ var res = bulk.execute();
+ assertAlways.writeOK(res);
+ assertAlways.eq(this.docsPerInsert, res.nInserted, tojson(res));
- this.nInserted += this.docsPerInsert;
- };
+ this.nInserted += this.docsPerInsert;
+ };
- $config.data.docsPerInsert = 15;
+ $config.data.docsPerInsert = 15;
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_upsert.js b/jstests/concurrency/fsm_workloads/indexed_insert_upsert.js
index 1f2fd0adedc..a3d0bd2c8cd 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_upsert.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_upsert.js
@@ -10,35 +10,37 @@
* Instead of inserting via coll.insert(), this workload inserts using an
* upsert.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- $config.data.indexedField = 'indexed_insert_upsert';
- $config.data.shardKey = {};
- $config.data.shardKey[$config.data.indexedField] = 1;
+ $config.data.indexedField = 'indexed_insert_upsert';
+ $config.data.shardKey = {};
+ $config.data.shardKey[$config.data.indexedField] = 1;
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
- this.counter = 0;
- };
+ this.counter = 0;
+ };
- $config.states.insert = function insert(db, collName) {
- var doc = this.getDoc();
- doc.counter = this.counter++; // ensure doc is unique to guarantee an upsert occurs
- doc._id = new ObjectId(); // _id is required for shard targeting
+ $config.states.insert = function insert(db, collName) {
+ var doc = this.getDoc();
+ doc.counter = this.counter++; // ensure doc is unique to guarantee an upsert occurs
+ doc._id = new ObjectId(); // _id is required for shard targeting
- var res = db[collName].update(doc, { $inc: { unused: 0 } }, { upsert: true });
- assertAlways.eq(0, res.nMatched, tojson(res));
- assertAlways.eq(1, res.nUpserted, tojson(res));
- if (db.getMongo().writeMode() === 'commands') {
- assertAlways.eq(0, res.nModified, tojson(res));
- }
+ var res = db[collName].update(doc, {$inc: {unused: 0}}, {upsert: true});
+ assertAlways.eq(0, res.nMatched, tojson(res));
+ assertAlways.eq(1, res.nUpserted, tojson(res));
+ if (db.getMongo().writeMode() === 'commands') {
+ assertAlways.eq(0, res.nModified, tojson(res));
+ }
- this.nInserted += this.docsPerInsert;
- };
+ this.nInserted += this.docsPerInsert;
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_where.js b/jstests/concurrency/fsm_workloads/indexed_insert_where.js
index 055ad1b574c..14408c26f69 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_where.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_where.js
@@ -15,9 +15,11 @@ var $config = (function() {
documentsToInsert: 100,
insertedDocuments: 0,
generateDocumentToInsert: function generateDocumentToInsert() {
- return { tid: this.tid };
+ return {
+ tid: this.tid
+ };
},
- shardKey: { tid: 1 }
+ shardKey: {tid: 1}
};
var states = {
@@ -33,20 +35,21 @@ var $config = (function() {
},
query: function query(db, collName) {
- var count = db[collName].find({ $where: 'this.tid === ' + this.tid }).itcount();
- assertWhenOwnColl.eq(count, this.insertedDocuments,
+ var count = db[collName].find({$where: 'this.tid === ' + this.tid}).itcount();
+ assertWhenOwnColl.eq(count,
+ this.insertedDocuments,
'$where query should return the number of documents this ' +
- 'thread inserted');
+ 'thread inserted');
}
};
var transitions = {
- insert: { insert: 0.2, query: 0.8 },
- query: { insert: 0.8, query: 0.2 }
+ insert: {insert: 0.2, query: 0.8},
+ query: {insert: 0.8, query: 0.2}
};
var setup = function setup(db, collName, cluster) {
- assertAlways.commandWorked(db[collName].ensureIndex({ tid: 1 }));
+ assertAlways.commandWorked(db[collName].ensureIndex({tid: 1}));
};
return {
diff --git a/jstests/concurrency/fsm_workloads/list_indexes.js b/jstests/concurrency/fsm_workloads/list_indexes.js
index 6ab3a8c28b9..6bcdb8ba96c 100644
--- a/jstests/concurrency/fsm_workloads/list_indexes.js
+++ b/jstests/concurrency/fsm_workloads/list_indexes.js
@@ -21,9 +21,8 @@ var $config = (function() {
// List indexes, using a batchSize of 2 to ensure getmores happen.
function listIndices(db, collName) {
- var cursor = new DBCommandCursor(db.getMongo(),
- db.runCommand({listIndexes: collName,
- cursor: {batchSize: 2}}));
+ var cursor = new DBCommandCursor(
+ db.getMongo(), db.runCommand({listIndexes: collName, cursor: {batchSize: 2}}));
assertWhenOwnColl.gte(cursor.itcount(), 0);
}
@@ -34,8 +33,8 @@ var $config = (function() {
})();
var transitions = {
- modifyIndices: { listIndices: 0.75, modifyIndices: 0.25 },
- listIndices: { listIndices: 0.25, modifyIndices: 0.75 }
+ modifyIndices: {listIndices: 0.75, modifyIndices: 0.25},
+ listIndices: {listIndices: 0.25, modifyIndices: 0.75}
};
function setup(db, collName) {
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_drop.js b/jstests/concurrency/fsm_workloads/map_reduce_drop.js
index 9ebdbab9ae3..ef03805dffd 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_drop.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_drop.js
@@ -60,7 +60,7 @@ var $config = (function() {
// iterations and threads in this workload.
var bulk = mapReduceDB[collName].initializeUnorderedBulkOp();
for (var i = 0; i < this.numDocs; ++i) {
- bulk.insert({ key: Random.randInt(10000) });
+ bulk.insert({key: Random.randInt(10000)});
}
var res = bulk.execute();
assertAlways.writeOK(res);
@@ -74,8 +74,7 @@ var $config = (function() {
try {
mapReduceDB[collName].mapReduce(this.mapper, this.reducer, options);
- }
- catch (e) {
+ } catch (e) {
// Ignore all mapReduce exceptions. This workload is only concerned
// with verifying server availability.
}
@@ -90,9 +89,9 @@ var $config = (function() {
})();
var transitions = {
- dropColl: { mapReduce: 1 },
- dropDB: { mapReduce: 1 },
- mapReduce: { mapReduce: 0.7, dropDB: 0.05, dropColl: 0.25 }
+ dropColl: {mapReduce: 1},
+ dropDB: {mapReduce: 1},
+ mapReduce: {mapReduce: 0.7, dropDB: 0.05, dropColl: 0.25}
};
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_inline.js b/jstests/concurrency/fsm_workloads/map_reduce_inline.js
index 278d9e95f25..1633ce0cc19 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_inline.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_inline.js
@@ -55,7 +55,7 @@ var $config = (function() {
function mapReduce(db, collName) {
var options = {
finalize: this.finalizer,
- out: { inline: 1 }
+ out: {inline: 1}
};
var res = db[collName].mapReduce(this.mapper, this.reducer, options);
@@ -70,8 +70,8 @@ var $config = (function() {
})();
var transitions = {
- init: { mapReduce: 1 },
- mapReduce: { mapReduce: 1 }
+ init: {mapReduce: 1},
+ mapReduce: {mapReduce: 1}
};
function makeDoc(keyLimit, valueLimit) {
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_merge.js b/jstests/concurrency/fsm_workloads/map_reduce_merge.js
index 7f83a924bce..fd892dc72d9 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_merge.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_merge.js
@@ -13,53 +13,52 @@
*
* Writes the results of each thread to the same collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
- // Use the workload name as the database name,
- // since the workload name is assumed to be unique.
- var uniqueDBName = 'map_reduce_merge';
+ // Use the workload name as the database name,
+ // since the workload name is assumed to be unique.
+ var uniqueDBName = 'map_reduce_merge';
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
- this.outDBName = db.getName() + uniqueDBName;
- };
+ this.outDBName = db.getName() + uniqueDBName;
+ };
- $config.states.mapReduce = function mapReduce(db, collName) {
- var outDB = db.getSiblingDB(this.outDBName);
- var fullName = outDB[collName].getFullName();
- assertAlways(outDB[collName].exists() !== null,
- "output collection '" + fullName + "' should exist");
+ $config.states.mapReduce = function mapReduce(db, collName) {
+ var outDB = db.getSiblingDB(this.outDBName);
+ var fullName = outDB[collName].getFullName();
+ assertAlways(outDB[collName].exists() !== null,
+ "output collection '" + fullName + "' should exist");
- // Have all threads combine their results into the same collection
- var options = {
- finalize: this.finalizer,
- out: {
- merge: collName,
- db: this.outDBName
- }
- };
+ // Have all threads combine their results into the same collection
+ var options = {
+ finalize: this.finalizer,
+ out: {merge: collName, db: this.outDBName}
+ };
- var res = db[collName].mapReduce(this.mapper, this.reducer, options);
- assertAlways.commandWorked(res);
- };
+ var res = db[collName].mapReduce(this.mapper, this.reducer, options);
+ assertAlways.commandWorked(res);
+ };
- $config.setup = function setup(db, collName, cluster) {
- $super.setup.apply(this, arguments);
+ $config.setup = function setup(db, collName, cluster) {
+ $super.setup.apply(this, arguments);
- var outDB = db.getSiblingDB(db.getName() + uniqueDBName);
- assertAlways.commandWorked(outDB.createCollection(collName));
- };
+ var outDB = db.getSiblingDB(db.getName() + uniqueDBName);
+ assertAlways.commandWorked(outDB.createCollection(collName));
+ };
- $config.teardown = function teardown(db, collName, cluster) {
- var outDB = db.getSiblingDB(db.getName() + uniqueDBName);
- var res = outDB.dropDatabase();
- assertAlways.commandWorked(res);
- assertAlways.eq(db.getName() + uniqueDBName, res.dropped);
- };
+ $config.teardown = function teardown(db, collName, cluster) {
+ var outDB = db.getSiblingDB(db.getName() + uniqueDBName);
+ var res = outDB.dropDatabase();
+ assertAlways.commandWorked(res);
+ assertAlways.eq(db.getName() + uniqueDBName, res.dropped);
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js b/jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js
index fd8d2c1136c..49897e20548 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js
@@ -13,51 +13,49 @@
*
* Specifies nonAtomic=true.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropDatabases
-
-var $config = extendWorkload($config, function($config, $super) {
-
- // Use the workload name as a prefix for the database name,
- // since the workload name is assumed to be unique.
- var prefix = 'map_reduce_merge_nonatomic';
-
- function uniqueDBName(prefix, tid) {
- return prefix + tid;
- }
-
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
-
- this.outDBName = db.getName() + uniqueDBName(prefix, this.tid);
- var outDB = db.getSiblingDB(this.outDBName);
- assertAlways.commandWorked(outDB.createCollection(collName));
- };
-
- $config.states.mapReduce = function mapReduce(db, collName) {
- var outDB = db.getSiblingDB(this.outDBName);
- var fullName = outDB[collName].getFullName();
- assertAlways(outDB[collName].exists() !== null,
- "output collection '" + fullName + "' should exist");
-
- var options = {
- finalize: this.finalizer,
- out: {
- merge: collName,
- db: this.outDBName,
- nonAtomic: true
- }
- };
-
- var res = db[collName].mapReduce(this.mapper, this.reducer, options);
- assertAlways.commandWorked(res);
- };
-
- $config.teardown = function teardown(db, collName, cluster) {
- var pattern = new RegExp('^' + db.getName() + prefix + '\\d+$');
- dropDatabases(db, pattern);
- };
-
- return $config;
-});
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropDatabases
+
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
+
+ // Use the workload name as a prefix for the database name,
+ // since the workload name is assumed to be unique.
+ var prefix = 'map_reduce_merge_nonatomic';
+
+ function uniqueDBName(prefix, tid) {
+ return prefix + tid;
+ }
+
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
+
+ this.outDBName = db.getName() + uniqueDBName(prefix, this.tid);
+ var outDB = db.getSiblingDB(this.outDBName);
+ assertAlways.commandWorked(outDB.createCollection(collName));
+ };
+
+ $config.states.mapReduce = function mapReduce(db, collName) {
+ var outDB = db.getSiblingDB(this.outDBName);
+ var fullName = outDB[collName].getFullName();
+ assertAlways(outDB[collName].exists() !== null,
+ "output collection '" + fullName + "' should exist");
+
+ var options = {
+ finalize: this.finalizer,
+ out: {merge: collName, db: this.outDBName, nonAtomic: true}
+ };
+
+ var res = db[collName].mapReduce(this.mapper, this.reducer, options);
+ assertAlways.commandWorked(res);
+ };
+
+ $config.teardown = function teardown(db, collName, cluster) {
+ var pattern = new RegExp('^' + db.getName() + prefix + '\\d+$');
+ dropDatabases(db, pattern);
+ };
+
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_reduce.js b/jstests/concurrency/fsm_workloads/map_reduce_reduce.js
index 53b1246f4c6..7f6ff6d535f 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_reduce.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_reduce.js
@@ -11,45 +11,47 @@
* Uses the "reduce" action to combine the results with the contents
* of the output collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-
-var $config = extendWorkload($config, function($config, $super) {
-
- // Use the workload name as a prefix for the collection name,
- // since the workload name is assumed to be unique.
- var prefix = 'map_reduce_reduce';
-
- function uniqueCollectionName(prefix, tid) {
- return prefix + tid;
- }
-
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
-
- this.outCollName = uniqueCollectionName(prefix, this.tid);
- assertAlways.commandWorked(db.createCollection(this.outCollName));
- };
-
- $config.states.mapReduce = function mapReduce(db, collName) {
- var fullName = db[this.outCollName].getFullName();
- assertAlways(db[this.outCollName].exists() !== null,
- "output collection '" + fullName + "' should exist");
-
- var options = {
- finalize: this.finalizer,
- out: { reduce: this.outCollName }
- };
-
- var res = db[collName].mapReduce(this.mapper, this.reducer, options);
- assertAlways.commandWorked(res);
- };
-
- $config.teardown = function teardown(db, collName, cluster) {
- var pattern = new RegExp('^' + prefix + '\\d+$');
- dropCollections(db, pattern);
- };
-
- return $config;
-});
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
+
+ // Use the workload name as a prefix for the collection name,
+ // since the workload name is assumed to be unique.
+ var prefix = 'map_reduce_reduce';
+
+ function uniqueCollectionName(prefix, tid) {
+ return prefix + tid;
+ }
+
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
+
+ this.outCollName = uniqueCollectionName(prefix, this.tid);
+ assertAlways.commandWorked(db.createCollection(this.outCollName));
+ };
+
+ $config.states.mapReduce = function mapReduce(db, collName) {
+ var fullName = db[this.outCollName].getFullName();
+ assertAlways(db[this.outCollName].exists() !== null,
+ "output collection '" + fullName + "' should exist");
+
+ var options = {
+ finalize: this.finalizer,
+ out: {reduce: this.outCollName}
+ };
+
+ var res = db[collName].mapReduce(this.mapper, this.reducer, options);
+ assertAlways.commandWorked(res);
+ };
+
+ $config.teardown = function teardown(db, collName, cluster) {
+ var pattern = new RegExp('^' + prefix + '\\d+$');
+ dropCollections(db, pattern);
+ };
+
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js b/jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js
index cb0eeb1948a..b566f9db39f 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js
@@ -14,48 +14,47 @@
* Specifies nonAtomic=true and writes the results of each thread to
* the same collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
- // Use the workload name as the collection name,
- // since the workload name is assumed to be unique.
- var uniqueCollectionName = 'map_reduce_reduce_nonatomic';
+ // Use the workload name as the collection name,
+ // since the workload name is assumed to be unique.
+ var uniqueCollectionName = 'map_reduce_reduce_nonatomic';
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
- this.outCollName = uniqueCollectionName;
- };
+ this.outCollName = uniqueCollectionName;
+ };
- $config.states.mapReduce = function mapReduce(db, collName) {
- var fullName = db[this.outCollName].getFullName();
- assertAlways(db[this.outCollName].exists() !== null,
- "output collection '" + fullName + "' should exist");
+ $config.states.mapReduce = function mapReduce(db, collName) {
+ var fullName = db[this.outCollName].getFullName();
+ assertAlways(db[this.outCollName].exists() !== null,
+ "output collection '" + fullName + "' should exist");
- // Have all threads combine their results into the same collection
- var options = {
- finalize: this.finalizer,
- out: {
- reduce: this.outCollName,
- nonAtomic: true
- }
- };
+ // Have all threads combine their results into the same collection
+ var options = {
+ finalize: this.finalizer,
+ out: {reduce: this.outCollName, nonAtomic: true}
+ };
- var res = db[collName].mapReduce(this.mapper, this.reducer, options);
- assertAlways.commandWorked(res);
- };
+ var res = db[collName].mapReduce(this.mapper, this.reducer, options);
+ assertAlways.commandWorked(res);
+ };
- $config.setup = function setup(db, collName, cluster) {
- $super.setup.apply(this, arguments);
+ $config.setup = function setup(db, collName, cluster) {
+ $super.setup.apply(this, arguments);
- assertAlways.commandWorked(db.createCollection(uniqueCollectionName));
- };
+ assertAlways.commandWorked(db.createCollection(uniqueCollectionName));
+ };
- $config.teardown = function teardown(db, collName, cluster) {
- assertAlways(db[uniqueCollectionName].drop());
- };
+ $config.teardown = function teardown(db, collName, cluster) {
+ assertAlways(db[uniqueCollectionName].drop());
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_replace.js b/jstests/concurrency/fsm_workloads/map_reduce_replace.js
index 186caf5a41e..4f22bd225b2 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_replace.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_replace.js
@@ -11,47 +11,49 @@
* Uses the "replace" action to overwrite the entire contents of the
* collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-
-var $config = extendWorkload($config, function($config, $super) {
-
- // Use the workload name as a prefix for the collection name,
- // since the workload name is assumed to be unique.
- var prefix = 'map_reduce_replace';
-
- function uniqueCollectionName(prefix, tid) {
- return prefix + tid;
- }
-
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
-
- this.outCollName = uniqueCollectionName(prefix, this.tid);
- assertAlways.commandWorked(db.createCollection(this.outCollName));
- };
-
- $config.states.mapReduce = function mapReduce(db, collName) {
- var fullName = db[this.outCollName].getFullName();
- assertAlways(db[this.outCollName].exists() !== null,
- "output collection '" + fullName + "' should exist");
-
- var options = {
- finalize: this.finalizer,
- out: { replace: this.outCollName },
- query: { key: { $exists: true }, value: { $exists: true } },
- sort: { _id: -1 } // sort key must be an existing index
- };
-
- var res = db[collName].mapReduce(this.mapper, this.reducer, options);
- assertAlways.commandWorked(res);
- };
-
- $config.teardown = function teardown(db, collName, cluster) {
- var pattern = new RegExp('^' + prefix + '\\d+$');
- dropCollections(db, pattern);
- };
-
- return $config;
-});
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
+
+ // Use the workload name as a prefix for the collection name,
+ // since the workload name is assumed to be unique.
+ var prefix = 'map_reduce_replace';
+
+ function uniqueCollectionName(prefix, tid) {
+ return prefix + tid;
+ }
+
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
+
+ this.outCollName = uniqueCollectionName(prefix, this.tid);
+ assertAlways.commandWorked(db.createCollection(this.outCollName));
+ };
+
+ $config.states.mapReduce = function mapReduce(db, collName) {
+ var fullName = db[this.outCollName].getFullName();
+ assertAlways(db[this.outCollName].exists() !== null,
+ "output collection '" + fullName + "' should exist");
+
+ var options = {
+ finalize: this.finalizer,
+ out: {replace: this.outCollName},
+ query: {key: {$exists: true}, value: {$exists: true}},
+ sort: {_id: -1} // sort key must be an existing index
+ };
+
+ var res = db[collName].mapReduce(this.mapper, this.reducer, options);
+ assertAlways.commandWorked(res);
+ };
+
+ $config.teardown = function teardown(db, collName, cluster) {
+ var pattern = new RegExp('^' + prefix + '\\d+$');
+ dropCollections(db, pattern);
+ };
+
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js b/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js
index 1cd6e18fbef..3ee8af21409 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js
@@ -10,41 +10,43 @@
* Uses the "replace" action to write the results to a nonexistent
* output collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-
-var $config = extendWorkload($config, function($config, $super) {
-
- // Use the workload name as a prefix for the collection name,
- // since the workload name is assumed to be unique.
- var prefix = 'map_reduce_replace_nonexistent';
-
- function uniqueCollectionName(prefix, tid) {
- return prefix + tid;
- }
-
- $config.states.mapReduce = function mapReduce(db, collName) {
- var outCollName = uniqueCollectionName(prefix, this.tid);
- var fullName = db[outCollName].getFullName();
- assertAlways.isnull(db[outCollName].exists(),
- "output collection '" + fullName + "' should not exist");
-
- var options = {
- finalize: this.finalizer,
- out: { replace: outCollName },
- query: { key: { $exists: true }, value: { $exists: true } }
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+
+ // Use the workload name as a prefix for the collection name,
+ // since the workload name is assumed to be unique.
+ var prefix = 'map_reduce_replace_nonexistent';
+
+ function uniqueCollectionName(prefix, tid) {
+ return prefix + tid;
+ }
+
+ $config.states.mapReduce = function mapReduce(db, collName) {
+ var outCollName = uniqueCollectionName(prefix, this.tid);
+ var fullName = db[outCollName].getFullName();
+ assertAlways.isnull(db[outCollName].exists(),
+ "output collection '" + fullName + "' should not exist");
+
+ var options = {
+ finalize: this.finalizer,
+ out: {replace: outCollName},
+ query: {key: {$exists: true}, value: {$exists: true}}
+ };
+
+ var res = db[collName].mapReduce(this.mapper, this.reducer, options);
+ assertAlways.commandWorked(res);
+ assertAlways(db[outCollName].drop());
};
- var res = db[collName].mapReduce(this.mapper, this.reducer, options);
- assertAlways.commandWorked(res);
- assertAlways(db[outCollName].drop());
- };
-
- $config.teardown = function teardown(db, collName, cluster) {
- var pattern = new RegExp('^' + prefix + '\\d+$');
- dropCollections(db, pattern);
- };
+ $config.teardown = function teardown(db, collName, cluster) {
+ var pattern = new RegExp('^' + prefix + '\\d+$');
+ dropCollections(db, pattern);
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js b/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
index f75b7e3c77f..abd1312b7c3 100644
--- a/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
+++ b/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
@@ -23,7 +23,7 @@ var $config = (function() {
var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < 1000; ++i) {
- bulk.insert({ a: 1, b: Random.rand() });
+ bulk.insert({a: 1, b: Random.rand()});
}
var res = bulk.execute();
assertAlways.writeOK(res);
@@ -31,8 +31,8 @@ var $config = (function() {
// Create two indexes to force plan caching: The {a: 1} index is
// cached by the query planner because we query on a single value
// of 'a' and a range of 'b' values.
- assertAlways.commandWorked(coll.ensureIndex({ a: 1 }));
- assertAlways.commandWorked(coll.ensureIndex({ b: 1 }));
+ assertAlways.commandWorked(coll.ensureIndex({a: 1}));
+ assertAlways.commandWorked(coll.ensureIndex({b: 1}));
}
var states = (function() {
@@ -41,7 +41,7 @@ var $config = (function() {
var coll = db.getSiblingDB(this.dbName)[collName];
var cmdObj = {
- query: { a: 1, b: { $gt: Random.rand() } },
+ query: {a: 1, b: {$gt: Random.rand()}},
limit: Random.randInt(10)
};
@@ -69,8 +69,8 @@ var $config = (function() {
})();
var transitions = {
- count: { count: 0.95, dropDB: 0.05 },
- dropDB: { count: 0.95, dropDB: 0.05 }
+ count: {count: 0.95, dropDB: 0.05},
+ dropDB: {count: 0.95, dropDB: 0.05}
};
function setup(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/reindex.js b/jstests/concurrency/fsm_workloads/reindex.js
index 51aad94c016..7d71e4ea7be 100644
--- a/jstests/concurrency/fsm_workloads/reindex.js
+++ b/jstests/concurrency/fsm_workloads/reindex.js
@@ -7,24 +7,24 @@
* against the collection. Operates on a separate collection for each thread.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
var $config = (function() {
var data = {
- nIndexes: 3 + 1, // 3 created and 1 for _id
+ nIndexes: 3 + 1, // 3 created and 1 for _id
nDocumentsToInsert: 1000,
- maxInteger: 100, // Used for document values. Must be a factor of nDocumentsToInsert
- prefix: 'reindex' // Use filename for prefix because filename is assumed unique
+ maxInteger: 100, // Used for document values. Must be a factor of nDocumentsToInsert
+ prefix: 'reindex' // Use filename for prefix because filename is assumed unique
};
var states = (function() {
function insertDocuments(db, collName) {
var bulk = db[collName].initializeUnorderedBulkOp();
for (var i = 0; i < this.nDocumentsToInsert; ++i) {
- bulk.insert({
- text: 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do' +
- ' eiusmod tempor incididunt ut labore et dolore magna aliqua.',
- geo: { type: 'Point', coordinates: [(i % 50) - 25, (i % 50) - 25] },
+ bulk.insert({
+ text: 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do' +
+ ' eiusmod tempor incididunt ut labore et dolore magna aliqua.',
+ geo: {type: 'Point', coordinates: [(i % 50) - 25, (i % 50) - 25]},
integer: i % this.maxInteger
});
}
@@ -35,43 +35,51 @@ var $config = (function() {
function createIndexes(db, collName) {
// The number of indexes created here is also stored in data.nIndexes
- var textResult = db[this.threadCollName].ensureIndex({ text: 'text' });
+ var textResult = db[this.threadCollName].ensureIndex({text: 'text'});
assertAlways.commandWorked(textResult);
- var geoResult = db[this.threadCollName].ensureIndex({ geo: '2dsphere' });
+ var geoResult = db[this.threadCollName].ensureIndex({geo: '2dsphere'});
assertAlways.commandWorked(geoResult);
- var integerResult = db[this.threadCollName].ensureIndex({ integer: 1 });
+ var integerResult = db[this.threadCollName].ensureIndex({integer: 1});
assertAlways.commandWorked(integerResult);
}
function init(db, collName) {
- this.threadCollName = this.prefix + '_' + this.tid;
+ this.threadCollName = this.prefix + '_' + this.tid;
insertDocuments.call(this, db, this.threadCollName);
}
function query(db, collName) {
var coll = db[this.threadCollName];
var nInsertedDocuments = this.nDocumentsToInsert;
- var count = coll.find({ integer: Random.randInt(this.maxInteger) }).itcount();
- assertWhenOwnColl.eq(nInsertedDocuments / this.maxInteger, count, 'number of ' +
- 'documents returned by integer query should match the number ' +
- 'inserted');
-
- var coords = [[ [-26, -26], [-26, 26], [26, 26], [26, -26], [-26, -26] ]];
- var geoQuery = { geo: { $geoWithin: { $geometry: { type: 'Polygon',
- coordinates: coords}}}};
+ var count = coll.find({integer: Random.randInt(this.maxInteger)}).itcount();
+ assertWhenOwnColl.eq(
+ nInsertedDocuments / this.maxInteger,
+ count,
+ 'number of ' +
+ 'documents returned by integer query should match the number ' +
+ 'inserted');
+
+ var coords = [[[-26, -26], [-26, 26], [26, 26], [26, -26], [-26, -26]]];
+ var geoQuery = {
+ geo: {$geoWithin: {$geometry: {type: 'Polygon', coordinates: coords}}}
+ };
// We can only perform a geo query when we own the collection and are sure a geo index
// is present. The same is true of text queries.
assertWhenOwnColl(function() {
count = coll.find(geoQuery).itcount();
- assertWhenOwnColl.eq(count, nInsertedDocuments, 'number of documents returned by' +
- ' geospatial query should match number inserted');
-
- count = coll.find({ $text: { $search: 'ipsum' } }).itcount();
- assertWhenOwnColl.eq(count, nInsertedDocuments, 'number of documents returned by' +
- ' text query should match number inserted');
+ assertWhenOwnColl.eq(count,
+ nInsertedDocuments,
+ 'number of documents returned by' +
+ ' geospatial query should match number inserted');
+
+ count = coll.find({$text: {$search: 'ipsum'}}).itcount();
+ assertWhenOwnColl.eq(count,
+ nInsertedDocuments,
+ 'number of documents returned by' +
+ ' text query should match number inserted');
});
var indexCount = db[this.threadCollName].getIndexes().length;
@@ -92,10 +100,10 @@ var $config = (function() {
})();
var transitions = {
- init: { createIndexes: 1 },
- createIndexes: { reIndex: 0.5, query: 0.5 },
- reIndex: { reIndex: 0.5, query: 0.5 },
- query: { reIndex: 0.5, query: 0.5 }
+ init: {createIndexes: 1},
+ createIndexes: {reIndex: 0.5, query: 0.5},
+ reIndex: {reIndex: 0.5, query: 0.5},
+ query: {reIndex: 0.5, query: 0.5}
};
var teardown = function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/reindex_background.js b/jstests/concurrency/fsm_workloads/reindex_background.js
index fe4d00bb9e5..7a5c25679f1 100644
--- a/jstests/concurrency/fsm_workloads/reindex_background.js
+++ b/jstests/concurrency/fsm_workloads/reindex_background.js
@@ -5,29 +5,31 @@
*
* Bulk inserts 1000 documents and builds indexes in background, then alternates between reindexing
* and querying against the collection. Operates on a separate collection for each thread. Note
- * that because indexes are initially built in the background, reindexing is also done in the
+ * that because indexes are initially built in the background, reindexing is also done in the
* background.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/reindex.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/reindex.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
- $config.data.prefix = 'reindex_background';
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
+ $config.data.prefix = 'reindex_background';
- $config.states.createIndexes = function createIndexes(db, collName) {
- var coll = db[this.threadCollName];
+ $config.states.createIndexes = function createIndexes(db, collName) {
+ var coll = db[this.threadCollName];
- // The number of indexes created here is also stored in data.nIndexes
- var textResult = coll.ensureIndex({ text: 'text' }, { background: true });
- assertAlways.commandWorked(textResult);
+ // The number of indexes created here is also stored in data.nIndexes
+ var textResult = coll.ensureIndex({text: 'text'}, {background: true});
+ assertAlways.commandWorked(textResult);
- var geoResult = coll.ensureIndex({ geo: '2dsphere' }, { background: true });
- assertAlways.commandWorked(geoResult);
+ var geoResult = coll.ensureIndex({geo: '2dsphere'}, {background: true});
+ assertAlways.commandWorked(geoResult);
- var integerResult = coll.ensureIndex({ integer: 1 }, {background: true });
- assertAlways.commandWorked(integerResult);
- };
+ var integerResult = coll.ensureIndex({integer: 1}, {background: true});
+ assertAlways.commandWorked(integerResult);
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js b/jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js
index 74139f07117..a57e61a44b0 100644
--- a/jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js
+++ b/jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js
@@ -27,8 +27,8 @@ var $config = (function() {
};
var transitions = {
- insert: { insert: 0.5, remove: 0.5 },
- remove: { insert: 0.5, remove: 0.5 }
+ insert: {insert: 0.5, remove: 0.5},
+ remove: {insert: 0.5, remove: 0.5}
};
return {
diff --git a/jstests/concurrency/fsm_workloads/remove_multiple_documents.js b/jstests/concurrency/fsm_workloads/remove_multiple_documents.js
index c04cb198f50..d809b0be8e3 100644
--- a/jstests/concurrency/fsm_workloads/remove_multiple_documents.js
+++ b/jstests/concurrency/fsm_workloads/remove_multiple_documents.js
@@ -13,7 +13,7 @@ var $config = (function() {
init: function init(db, collName) {
this.numDocs = 200;
for (var i = 0; i < this.numDocs; ++i) {
- db[collName].insert({ tid: this.tid, rand: Random.rand() });
+ db[collName].insert({tid: this.tid, rand: Random.rand()});
}
},
@@ -22,28 +22,22 @@ var $config = (function() {
var low = Random.rand();
var high = low + 0.05 * Random.rand();
- var res = db[collName].remove({
- tid: this.tid,
- rand: { $gte: low, $lte: high }
- });
+ var res = db[collName].remove({tid: this.tid, rand: {$gte: low, $lte: high}});
assertAlways.gte(res.nRemoved, 0);
assertAlways.lte(res.nRemoved, this.numDocs);
this.numDocs -= res.nRemoved;
},
count: function count(db, collName) {
- var numDocs = db[collName].find({ tid: this.tid }).itcount();
+ var numDocs = db[collName].find({tid: this.tid}).itcount();
assertWhenOwnColl.eq(this.numDocs, numDocs);
}
};
var transitions = {
- init: { count: 1 },
- count: { remove: 1 },
- remove: {
- remove: 0.825,
- count: 0.125
- }
+ init: {count: 1},
+ count: {remove: 1},
+ remove: {remove: 0.825, count: 0.125}
};
return {
diff --git a/jstests/concurrency/fsm_workloads/remove_single_document.js b/jstests/concurrency/fsm_workloads/remove_single_document.js
index 75442919f2d..5f83e0f57f8 100644
--- a/jstests/concurrency/fsm_workloads/remove_single_document.js
+++ b/jstests/concurrency/fsm_workloads/remove_single_document.js
@@ -10,19 +10,13 @@ var $config = (function() {
var states = {
remove: function remove(db, collName) {
// try removing a random document
- var res = this.doRemove(db,
- collName,
- { rand: { $gte: Random.rand() } },
- { justOne: true });
+ var res = this.doRemove(db, collName, {rand: {$gte: Random.rand()}}, {justOne: true});
assertAlways.lte(res.nRemoved, 1);
if (res.nRemoved === 0) {
// The above remove() can fail to remove a document when the random value
// in the query is greater than any of the random values in the collection.
// When that situation occurs, just remove an arbitrary document instead.
- res = this.doRemove(db,
- collName,
- {},
- { justOne: true });
+ res = this.doRemove(db, collName, {}, {justOne: true});
assertAlways.lte(res.nRemoved, 1);
}
this.assertResult(res);
@@ -30,14 +24,14 @@ var $config = (function() {
};
var transitions = {
- remove: { remove: 1 }
+ remove: {remove: 1}
};
function setup(db, collName, cluster) {
// insert enough documents so that each thread can remove exactly one per iteration
var num = this.threadCount * this.iterations;
for (var i = 0; i < num; ++i) {
- db[collName].insert({ i: i, rand: Random.rand() });
+ db[collName].insert({i: i, rand: Random.rand()});
}
assertWhenOwnColl.eq(db[collName].find().itcount(), num);
}
diff --git a/jstests/concurrency/fsm_workloads/remove_single_document_eval.js b/jstests/concurrency/fsm_workloads/remove_single_document_eval.js
index ee6411c9c74..97dca4e242f 100644
--- a/jstests/concurrency/fsm_workloads/remove_single_document_eval.js
+++ b/jstests/concurrency/fsm_workloads/remove_single_document_eval.js
@@ -5,33 +5,35 @@
*
* Runs remove_single_document using the eval command.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/remove_single_document.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/remove_single_document.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
- $config.data.doRemove = function doRemove(db, collName, query, options) {
- var evalResult = db.runCommand({
- eval: function(f, collName, query, options) {
- return tojson(f(db, collName, query, options));
- },
- args: [$super.data.doRemove, collName, query, options],
- nolock: this.nolock
- });
- assertAlways.commandWorked(evalResult);
- var res = JSON.parse(evalResult.retval);
- return res;
- };
+ $config.data.doRemove = function doRemove(db, collName, query, options) {
+ var evalResult = db.runCommand({
+ eval: function(f, collName, query, options) {
+ return tojson(f(db, collName, query, options));
+ },
+ args: [$super.data.doRemove, collName, query, options],
+ nolock: this.nolock
+ });
+ assertAlways.commandWorked(evalResult);
+ var res = JSON.parse(evalResult.retval);
+ return res;
+ };
- $config.data.assertResult = function assertResult(res) {
- assertWhenOwnColl.eq(1, res.nRemoved, tojson(res));
- };
+ $config.data.assertResult = function assertResult(res) {
+ assertWhenOwnColl.eq(1, res.nRemoved, tojson(res));
+ };
- $config.data.nolock = false;
+ $config.data.nolock = false;
- // scale down threadCount and iterations because eval takes a global lock
- $config.threadCount = 5;
- $config.iterations = 10;
+ // scale down threadCount and iterations because eval takes a global lock
+ $config.threadCount = 5;
+ $config.iterations = 10;
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/remove_single_document_eval_nolock.js b/jstests/concurrency/fsm_workloads/remove_single_document_eval_nolock.js
index c5aba00523e..1663f808fdb 100644
--- a/jstests/concurrency/fsm_workloads/remove_single_document_eval_nolock.js
+++ b/jstests/concurrency/fsm_workloads/remove_single_document_eval_nolock.js
@@ -5,12 +5,13 @@
*
* Runs remove_single_document_eval with the eval option { nolock: true }.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/remove_single_document_eval.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/remove_single_document_eval.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload($config,
+ function($config, $super) {
- $config.data.nolock = true;
+ $config.data.nolock = true;
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/remove_where.js b/jstests/concurrency/fsm_workloads/remove_where.js
index ecfbe722db8..0ef4f3d9931 100644
--- a/jstests/concurrency/fsm_workloads/remove_where.js
+++ b/jstests/concurrency/fsm_workloads/remove_where.js
@@ -8,35 +8,40 @@
* counts.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
- $config.data.randomBound = 10;
- $config.data.generateDocumentToInsert = function generateDocumentToInsert() {
- return { tid: this.tid, x: Random.randInt(this.randomBound) };
- };
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+ $config.data.randomBound = 10;
+ $config.data.generateDocumentToInsert = function generateDocumentToInsert() {
+ return {
+ tid: this.tid,
+ x: Random.randInt(this.randomBound)
+ };
+ };
- $config.states.remove = function remove(db, collName) {
- var res = db[collName].remove({
- // Server-side JS does not support Random.randInt, so use Math.floor/random instead
- $where: 'this.x === Math.floor(Math.random() * ' + this.randomBound + ') ' +
+ $config.states.remove = function remove(db, collName) {
+ var res = db[collName].remove({
+ // Server-side JS does not support Random.randInt, so use Math.floor/random instead
+ $where: 'this.x === Math.floor(Math.random() * ' + this.randomBound + ') ' +
'&& this.tid === ' + this.tid
- });
- assertWhenOwnColl.gte(res.nRemoved, 0);
- assertWhenOwnColl.lte(res.nRemoved, this.insertedDocuments);
- this.insertedDocuments -= res.nRemoved;
- };
+ });
+ assertWhenOwnColl.gte(res.nRemoved, 0);
+ assertWhenOwnColl.lte(res.nRemoved, this.insertedDocuments);
+ this.insertedDocuments -= res.nRemoved;
+ };
- $config.transitions = {
- insert: { insert: 0.2, remove: 0.4, query: 0.4 },
- remove: { insert: 0.4, remove: 0.2, query: 0.4 },
- query: { insert: 0.4, remove: 0.4, query: 0.2 }
- };
+ $config.transitions = {
+ insert: {insert: 0.2, remove: 0.4, query: 0.4},
+ remove: {insert: 0.4, remove: 0.2, query: 0.4},
+ query: {insert: 0.4, remove: 0.4, query: 0.2}
+ };
- $config.setup = function setup(db, collName, cluster) {
- /* no-op to prevent index from being created */
- };
+ $config.setup = function setup(db, collName, cluster) {
+ /* no-op to prevent index from being created */
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js
index 7b77eec792b..b02642cb4c2 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js
@@ -7,7 +7,7 @@
* command against it. The previous "to" namespace is used as the next "from"
* namespace.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
var $config = (function() {
@@ -52,8 +52,8 @@ var $config = (function() {
})();
var transitions = {
- init: { rename: 1 },
- rename: { rename: 1 }
+ init: {rename: 1},
+ rename: {rename: 1}
};
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js
index f9a02412aef..d69bb975d62 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js
@@ -7,7 +7,7 @@
* command against it, specifying a different database name in the namespace.
* The previous "to" namespace is used as the next "from" namespace.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropDatabases
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropDatabases
var $config = (function() {
@@ -65,8 +65,8 @@ var $config = (function() {
})();
var transitions = {
- init: { rename: 1 },
- rename: { rename: 1 }
+ init: {rename: 1},
+ rename: {rename: 1}
};
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
index d995ec143b8..06930a0457c 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
@@ -7,7 +7,7 @@
* command against it, specifying a different database name in the namespace.
* Inserts documents into the "to" namespace and specifies dropTarget=true.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropDatabases
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropDatabases
var $config = (function() {
@@ -88,8 +88,8 @@ var $config = (function() {
})();
var transitions = {
- init: { rename: 1 },
- rename: { rename: 1 }
+ init: {rename: 1},
+ rename: {rename: 1}
};
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
index b656b004373..11621a0318b 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
@@ -7,7 +7,7 @@
* command against it. Inserts documents into the "to" namespace and specifies
* dropTarget=true.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
var $config = (function() {
@@ -59,8 +59,8 @@ var $config = (function() {
// Verify that 'fromCollCount' documents exist in the "to" collection
// after the rename occurs
- var res = db[this.fromCollName].renameCollection(this.toCollName,
- true /* dropTarget */);
+ var res =
+ db[this.fromCollName].renameCollection(this.toCollName, true /* dropTarget */);
assertWhenOwnDB.commandWorked(res);
assertWhenOwnDB(db[this.toCollName].isCapped());
assertWhenOwnDB.eq(fromCollCount, db[this.toCollName].find().itcount());
@@ -80,8 +80,8 @@ var $config = (function() {
})();
var transitions = {
- init: { rename: 1 },
- rename: { rename: 1 }
+ init: {rename: 1},
+ rename: {rename: 1}
};
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_chain.js b/jstests/concurrency/fsm_workloads/rename_collection_chain.js
index 0514fe6d075..81c0313e217 100644
--- a/jstests/concurrency/fsm_workloads/rename_collection_chain.js
+++ b/jstests/concurrency/fsm_workloads/rename_collection_chain.js
@@ -7,7 +7,7 @@
* command against it. The previous "to" namespace is used as the next "from"
* namespace.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
var $config = (function() {
@@ -44,8 +44,8 @@ var $config = (function() {
})();
var transitions = {
- init: { rename: 1 },
- rename: { rename: 1 }
+ init: {rename: 1},
+ rename: {rename: 1}
};
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js b/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js
index 505c77d1d5e..d11dfd19d22 100644
--- a/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js
+++ b/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js
@@ -7,7 +7,7 @@
* command against it, specifying a different database name in the namespace.
* The previous "to" namespace is used as the next "from" namespace.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropDatabases
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropDatabases
var $config = (function() {
@@ -57,8 +57,8 @@ var $config = (function() {
})();
var transitions = {
- init: { rename: 1 },
- rename: { rename: 1 }
+ init: {rename: 1},
+ rename: {rename: 1}
};
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js b/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js
index cdc135ba069..453d5a27379 100644
--- a/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js
@@ -7,7 +7,7 @@
* command against it, specifying a different database name in the namespace.
* Inserts documents into the "to" namespace and specifies dropTarget=true.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropDatabases
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropDatabases
var $config = (function() {
@@ -80,8 +80,8 @@ var $config = (function() {
})();
var transitions = {
- init: { rename: 1 },
- rename: { rename: 1 }
+ init: {rename: 1},
+ rename: {rename: 1}
};
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js b/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js
index 161720d019e..bb2651258a5 100644
--- a/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js
@@ -7,7 +7,7 @@
* command against it. Inserts documents into the "to" namespace and specifies
* dropTarget=true.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
var $config = (function() {
@@ -52,8 +52,8 @@ var $config = (function() {
// Verify that 'fromCollCount' documents exist in the "to" collection
// after the rename occurs
- var res = db[this.fromCollName].renameCollection(this.toCollName,
- true /* dropTarget */);
+ var res =
+ db[this.fromCollName].renameCollection(this.toCollName, true /* dropTarget */);
assertWhenOwnDB.commandWorked(res);
assertWhenOwnDB.eq(fromCollCount, db[this.toCollName].find().itcount());
assertWhenOwnDB.eq(0, db[this.fromCollName].find().itcount());
@@ -72,8 +72,8 @@ var $config = (function() {
})();
var transitions = {
- init: { rename: 1 },
- rename: { rename: 1 }
+ init: {rename: 1},
+ rename: {rename: 1}
};
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/server_status.js b/jstests/concurrency/fsm_workloads/server_status.js
index 70de8395f49..dbde1420b99 100644
--- a/jstests/concurrency/fsm_workloads/server_status.js
+++ b/jstests/concurrency/fsm_workloads/server_status.js
@@ -24,7 +24,7 @@ var $config = (function() {
};
var transitions = {
- status: { status: 1 }
+ status: {status: 1}
};
return {
diff --git a/jstests/concurrency/fsm_workloads/touch_base.js b/jstests/concurrency/fsm_workloads/touch_base.js
index df2d0851cd5..6e2cce202ed 100644
--- a/jstests/concurrency/fsm_workloads/touch_base.js
+++ b/jstests/concurrency/fsm_workloads/touch_base.js
@@ -4,49 +4,60 @@
* touch_base.js
*
* Bulk inserts documents in batches of 100, uses the touch command on "data" and "index",
- * and queries to verify the number of documents inserted by the thread.
+ * and queries to verify the number of documents inserted by the thread.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config
// For isMongod, isMMAPv1, and isEphemeral.
load('jstests/concurrency/fsm_workload_helpers/server_types.js');
-var $config = extendWorkload($config, function($config, $super) {
- $config.data.generateDocumentToInsert = function generateDocumentToInsert() {
- return { tid: this.tid, x: Random.randInt(10) };
- };
-
- $config.data.generateTouchCmdObj = function generateTouchCmdObj(collName) {
- return { touch: collName, data: true, index: true };
- };
-
- $config.states.touch = function touch(db, collName) {
- var res = db.runCommand(this.generateTouchCmdObj(collName));
- if (isMongod(db) && (isMMAPv1(db) || isEphemeral(db))) {
- assertAlways.commandWorked(res);
- } else {
- // SERVER-16850 and SERVER-16797
- assertAlways.commandFailed(res);
- }
- };
-
- $config.states.query = function query(db, collName) {
- var count = db[collName].find( { tid: this.tid } ).itcount();
- assertWhenOwnColl.eq(count, this.insertedDocuments,
- 'collection scan should return the number of documents this thread' +
- ' inserted');
- };
-
- $config.transitions = {
- insert: { insert: 0.2, touch: 0.4, query: 0.4 },
- touch: { insert: 0.4, touch: 0.2, query: 0.4 },
- query: { insert: 0.4, touch: 0.4, query: 0.2 }
- };
-
- $config.setup = function setup(db, collName, cluster) {
- assertAlways.commandWorked(db[collName].ensureIndex({ x: 1 }));
- };
-
- return $config;
-});
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
+ $config.data.generateDocumentToInsert = function generateDocumentToInsert() {
+ return {
+ tid: this.tid,
+ x: Random.randInt(10)
+ };
+ };
+
+ $config.data.generateTouchCmdObj = function generateTouchCmdObj(collName) {
+ return {
+ touch: collName,
+ data: true,
+ index: true
+ };
+ };
+
+ $config.states.touch = function touch(db, collName) {
+ var res = db.runCommand(this.generateTouchCmdObj(collName));
+ if (isMongod(db) && (isMMAPv1(db) || isEphemeral(db))) {
+ assertAlways.commandWorked(res);
+ } else {
+ // SERVER-16850 and SERVER-16797
+ assertAlways.commandFailed(res);
+ }
+ };
+
+ $config.states.query = function query(db, collName) {
+ var count = db[collName].find({tid: this.tid}).itcount();
+ assertWhenOwnColl.eq(
+ count,
+ this.insertedDocuments,
+ 'collection scan should return the number of documents this thread' +
+ ' inserted');
+ };
+
+ $config.transitions = {
+ insert: {insert: 0.2, touch: 0.4, query: 0.4},
+ touch: {insert: 0.4, touch: 0.2, query: 0.4},
+ query: {insert: 0.4, touch: 0.4, query: 0.2}
+ };
+
+ $config.setup = function setup(db, collName, cluster) {
+ assertAlways.commandWorked(db[collName].ensureIndex({x: 1}));
+ };
+
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/touch_data.js b/jstests/concurrency/fsm_workloads/touch_data.js
index 08130dfcf2e..dc3b7cecef0 100644
--- a/jstests/concurrency/fsm_workloads/touch_data.js
+++ b/jstests/concurrency/fsm_workloads/touch_data.js
@@ -4,16 +4,22 @@
* touch_data.js
*
* Bulk inserts documents in batches of 100, uses touch on "data" but not "index",
- * and queries to verify the number of documents inserted by the thread.
+ * and queries to verify the number of documents inserted by the thread.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/touch_base.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/touch_base.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
- $config.data.generateTouchCmdObj = function generateTouchCmdObj(collName) {
- return { touch: collName, data: true, index: false };
- };
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
+ $config.data.generateTouchCmdObj = function generateTouchCmdObj(collName) {
+ return {
+ touch: collName,
+ data: true,
+ index: false
+ };
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/touch_index.js b/jstests/concurrency/fsm_workloads/touch_index.js
index a1cfa6db2ba..cc0b6fcf48d 100644
--- a/jstests/concurrency/fsm_workloads/touch_index.js
+++ b/jstests/concurrency/fsm_workloads/touch_index.js
@@ -4,16 +4,22 @@
* touch_index.js
*
* Bulk inserts documents in batches of 100, uses touch on "index" but not "data",
- * and queries to verify the number of documents inserted by the thread.
+ * and queries to verify the number of documents inserted by the thread.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/touch_base.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/touch_base.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
- $config.data.generateTouchCmdObj = function generateTouchCmdObj(collName) {
- return { touch: collName, data: false, index: true };
- };
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
+ $config.data.generateTouchCmdObj = function generateTouchCmdObj(collName) {
+ return {
+ touch: collName,
+ data: false,
+ index: true
+ };
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/touch_no_data_no_index.js b/jstests/concurrency/fsm_workloads/touch_no_data_no_index.js
index 18cf0329b02..25ce50fc5ac 100644
--- a/jstests/concurrency/fsm_workloads/touch_no_data_no_index.js
+++ b/jstests/concurrency/fsm_workloads/touch_no_data_no_index.js
@@ -4,22 +4,28 @@
* touch_no_data_no_index.js
*
* Bulk inserts documents in batches of 100, uses touch as a no-op,
- * and queries to verify the number of documents inserted by the thread.
+ * and queries to verify the number of documents inserted by the thread.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/touch_base.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/touch_base.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
- $config.data.generateTouchCmdObj = function generateTouchCmdObj(collName) {
- return { touch: collName, data: false, index: false };
- };
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
+ $config.data.generateTouchCmdObj = function generateTouchCmdObj(collName) {
+ return {
+ touch: collName,
+ data: false,
+ index: false
+ };
+ };
- $config.states.touch = function touch(db, collName) {
- var res = db.runCommand(this.generateTouchCmdObj(collName));
- // The command always fails because "index" and "data" are both false
- assertAlways.commandFailed(res);
- };
+ $config.states.touch = function touch(db, collName) {
+ var res = db.runCommand(this.generateTouchCmdObj(collName));
+ // The command always fails because "index" and "data" are both false
+ assertAlways.commandFailed(res);
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/update_and_bulk_insert.js b/jstests/concurrency/fsm_workloads/update_and_bulk_insert.js
index ab2671eb5b1..5d59ff2b0ef 100644
--- a/jstests/concurrency/fsm_workloads/update_and_bulk_insert.js
+++ b/jstests/concurrency/fsm_workloads/update_and_bulk_insert.js
@@ -22,7 +22,7 @@ var $config = (function() {
},
update: function update(db, collName) {
- var res = db[collName].update({}, { $inc: { n: 1 } }, { multi: true });
+ var res = db[collName].update({}, {$inc: {n: 1}}, {multi: true});
assertAlways.lte(0, res.nMatched, tojson(res));
if (db.getMongo().writeMode() === 'commands') {
assertAlways.eq(res.nMatched, res.nModified, tojson(res));
@@ -32,8 +32,8 @@ var $config = (function() {
};
var transitions = {
- insert: { insert: 0.2, update: 0.8 },
- update: { insert: 0.2, update: 0.8 }
+ insert: {insert: 0.2, update: 0.8},
+ update: {insert: 0.2, update: 0.8}
};
return {
diff --git a/jstests/concurrency/fsm_workloads/update_array.js b/jstests/concurrency/fsm_workloads/update_array.js
index b2681fb792a..e275d290911 100644
--- a/jstests/concurrency/fsm_workloads/update_array.js
+++ b/jstests/concurrency/fsm_workloads/update_array.js
@@ -9,7 +9,7 @@
* though other threads in the workload may be modifying the array between the
* update and the find, because thread ids are unique.
*/
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
var $config = (function() {
@@ -26,8 +26,7 @@ var $config = (function() {
if (db.getMongo().writeMode() === 'commands') {
assertWhenOwnColl.contains(res.nModified, nModifiedPossibilities, tojson(res));
}
- }
- else {
+ } else {
// Zero matches are possible for MMAP v1 because the update will skip a document
// that was invalidated during a yield.
assertWhenOwnColl.contains(res.nMatched, [0, 1], tojson(res));
@@ -38,13 +37,13 @@ var $config = (function() {
}
function doPush(db, collName, docIndex, value) {
- var res = db[collName].update({ _id: docIndex }, { $push: { arr: value } });
+ var res = db[collName].update({_id: docIndex}, {$push: {arr: value}});
// assert the update reported success
assertUpdateSuccess(db, res, [1]);
// find the doc and make sure it was updated
- var doc = db[collName].findOne({ _id: docIndex });
+ var doc = db[collName].findOne({_id: docIndex});
assertWhenOwnColl(function() {
assertWhenOwnColl.neq(null, doc);
assertWhenOwnColl(doc.hasOwnProperty('arr'),
@@ -54,21 +53,22 @@ var $config = (function() {
// anything. The $push operator always modifies the matched document, so if we
// matched something, then we must have updated it.
if (res.nMatched > 0) {
- assertWhenOwnColl.contains(value, doc.arr,
+ assertWhenOwnColl.contains(value,
+ doc.arr,
"doc.arr doesn't contain value (" + value +
- ') after $push: ' + tojson(doc.arr));
+ ') after $push: ' + tojson(doc.arr));
}
});
}
function doPull(db, collName, docIndex, value) {
- var res = db[collName].update({ _id: docIndex }, { $pull: { arr: value } });
+ var res = db[collName].update({_id: docIndex}, {$pull: {arr: value}});
// assert the update reported success
assertUpdateSuccess(db, res, [0, 1]);
// find the doc and make sure it was updated
- var doc = db[collName].findOne({ _id: docIndex });
+ var doc = db[collName].findOne({_id: docIndex});
assertWhenOwnColl(function() {
assertWhenOwnColl.neq(null, doc);
@@ -77,9 +77,10 @@ var $config = (function() {
// removed all occurrences of 'value' from the array (meaning that there should be
// none left).
if (res.nMatched > 0) {
- assertWhenOwnColl.eq(-1, doc.arr.indexOf(value),
+ assertWhenOwnColl.eq(-1,
+ doc.arr.indexOf(value),
'doc.arr contains removed value (' + value +
- ') after $pull: ' + tojson(doc.arr));
+ ') after $pull: ' + tojson(doc.arr));
}
});
}
@@ -103,21 +104,15 @@ var $config = (function() {
})();
var transitions = {
- push: {
- push: 0.8,
- pull: 0.2
- },
- pull: {
- push: 0.8,
- pull: 0.2
- }
+ push: {push: 0.8, pull: 0.2},
+ pull: {push: 0.8, pull: 0.2}
};
function setup(db, collName, cluster) {
// index on 'arr', the field being updated
- assertAlways.commandWorked(db[collName].ensureIndex({ arr: 1 }));
+ assertAlways.commandWorked(db[collName].ensureIndex({arr: 1}));
for (var i = 0; i < this.numDocs; ++i) {
- var res = db[collName].insert({ _id: i, arr: [] });
+ var res = db[collName].insert({_id: i, arr: []});
assertWhenOwnColl.writeOK(res);
assertWhenOwnColl.eq(1, res.nInserted);
}
@@ -129,9 +124,7 @@ var $config = (function() {
startState: 'push',
states: states,
transitions: transitions,
- data: {
- numDocs: 10
- },
+ data: {numDocs: 10},
setup: setup
};
diff --git a/jstests/concurrency/fsm_workloads/update_array_noindex.js b/jstests/concurrency/fsm_workloads/update_array_noindex.js
index cd1b4c27129..2e99c5a709b 100644
--- a/jstests/concurrency/fsm_workloads/update_array_noindex.js
+++ b/jstests/concurrency/fsm_workloads/update_array_noindex.js
@@ -6,8 +6,8 @@
* Executes the update_array.js workload after dropping all non-_id indexes on
* the collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_array.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_array.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
var $config = extendWorkload($config, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_check_index.js b/jstests/concurrency/fsm_workloads/update_check_index.js
index a7b71b98848..3e099d6b2a5 100644
--- a/jstests/concurrency/fsm_workloads/update_check_index.js
+++ b/jstests/concurrency/fsm_workloads/update_check_index.js
@@ -21,7 +21,7 @@ var $config = (function() {
})();
var transitions = {
- multiUpdate: { multiUpdate: 1.0 }
+ multiUpdate: {multiUpdate: 1.0}
};
function setup(db, collName, cluster) {
@@ -41,16 +41,16 @@ var $config = (function() {
assertWhenOwnColl(function() {
var numIndexKeys = db[collName].find({}, {_id: 0, a: 1}).hint({a: 1}).itcount();
var numDocs = db[collName].find().itcount();
- assertWhenOwnColl.eq(numIndexKeys, numDocs,
- 'index {a: 1} has wrong number of index keys');
+ assertWhenOwnColl.eq(
+ numIndexKeys, numDocs, 'index {a: 1} has wrong number of index keys');
numIndexKeys = db[collName].find({}, {_id: 0, b: 1}).hint({b: 1}).itcount();
- assertWhenOwnColl.eq(numIndexKeys, numDocs,
- 'index {b: 1} has wrong number of index keys');
+ assertWhenOwnColl.eq(
+ numIndexKeys, numDocs, 'index {b: 1} has wrong number of index keys');
numIndexKeys = db[collName].find({}, {_id: 0, c: 1}).hint({c: 1}).itcount();
- assertWhenOwnColl.eq(numIndexKeys, numDocs,
- 'index {c: 1} has wrong number of index keys');
+ assertWhenOwnColl.eq(
+ numIndexKeys, numDocs, 'index {c: 1} has wrong number of index keys');
});
}
diff --git a/jstests/concurrency/fsm_workloads/update_inc.js b/jstests/concurrency/fsm_workloads/update_inc.js
index adc1c536fdd..bd4c832e96f 100644
--- a/jstests/concurrency/fsm_workloads/update_inc.js
+++ b/jstests/concurrency/fsm_workloads/update_inc.js
@@ -8,7 +8,7 @@
* field. Asserts that the field has the correct value based on the number
* of increments performed.
*/
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
var $config = (function() {
@@ -25,10 +25,12 @@ var $config = (function() {
},
update: function update(db, collName) {
- var updateDoc = { $inc: {} };
+ var updateDoc = {
+ $inc: {}
+ };
updateDoc.$inc[this.fieldName] = 1;
- var res = db[collName].update({ _id: this.id }, updateDoc);
+ var res = db[collName].update({_id: this.id}, updateDoc);
assertAlways.eq(0, res.nUpserted, tojson(res));
if (isMongod(db) && !isMMAPv1(db)) {
@@ -38,8 +40,7 @@ var $config = (function() {
if (db.getMongo().writeMode() === 'commands') {
assertWhenOwnColl.eq(res.nModified, 1, tojson(res));
}
- }
- else {
+ } else {
// Zero matches are possible for MMAP v1 because the update will skip a document
// that was invalidated during a yield.
assertWhenOwnColl.contains(res.nMatched, [0, 1], tojson(res));
@@ -70,13 +71,15 @@ var $config = (function() {
};
var transitions = {
- init: { update: 1 },
- update: { find: 1 },
- find: { update: 1 }
+ init: {update: 1},
+ update: {find: 1},
+ find: {update: 1}
};
function setup(db, collName, cluster) {
- var doc = { _id: this.id };
+ var doc = {
+ _id: this.id
+ };
// Pre-populate the fields we need to avoid size change for capped collections.
for (var i = 0; i < this.threadCount; ++i) {
diff --git a/jstests/concurrency/fsm_workloads/update_inc_capped.js b/jstests/concurrency/fsm_workloads/update_inc_capped.js
index 34c8fbc72b5..19588195f07 100644
--- a/jstests/concurrency/fsm_workloads/update_inc_capped.js
+++ b/jstests/concurrency/fsm_workloads/update_inc_capped.js
@@ -5,8 +5,8 @@
*
* Executes the update_inc.js workload on a capped collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_inc.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/make_capped.js'); // for makeCapped
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_inc.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/make_capped.js'); // for makeCapped
var $config = extendWorkload($config, makeCapped);
diff --git a/jstests/concurrency/fsm_workloads/update_multifield.js b/jstests/concurrency/fsm_workloads/update_multifield.js
index f04f347d262..af520797ac8 100644
--- a/jstests/concurrency/fsm_workloads/update_multifield.js
+++ b/jstests/concurrency/fsm_workloads/update_multifield.js
@@ -7,7 +7,7 @@
* The collection has an index for each field, and a compound index for all fields.
*/
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
var $config = (function() {
@@ -34,9 +34,15 @@ var $config = (function() {
var push = Random.rand() > 0.2;
var updateDoc = {};
- updateDoc[set ? '$set' : '$unset'] = { x: x };
- updateDoc[push ? '$push' : '$pull'] = { y: y };
- updateDoc.$inc = { z: z };
+ updateDoc[set ? '$set' : '$unset'] = {
+ x: x
+ };
+ updateDoc[push ? '$push' : '$pull'] = {
+ y: y
+ };
+ updateDoc.$inc = {
+ z: z
+ };
return updateDoc;
}
@@ -47,32 +53,29 @@ var $config = (function() {
var updateDoc = makeRandomUpdateDoc();
// apply this update
- var query = makeQuery({
- multi: this.multi,
- isolated: this.isolated,
- numDocs: this.numDocs
- });
- var res = db[collName].update(query, updateDoc, { multi: this.multi });
+ var query =
+ makeQuery({multi: this.multi, isolated: this.isolated, numDocs: this.numDocs});
+ var res = db[collName].update(query, updateDoc, {multi: this.multi});
this.assertResult(res, db, collName, query);
}
};
var transitions = {
- update: { update: 1 }
+ update: {update: 1}
};
function setup(db, collName, cluster) {
- assertAlways.commandWorked(db[collName].ensureIndex({ x: 1 }));
- assertAlways.commandWorked(db[collName].ensureIndex({ y: 1 }));
- assertAlways.commandWorked(db[collName].ensureIndex({ z: 1 }));
- assertAlways.commandWorked(db[collName].ensureIndex({ x: 1, y: 1, z: 1 }));
+ assertAlways.commandWorked(db[collName].ensureIndex({x: 1}));
+ assertAlways.commandWorked(db[collName].ensureIndex({y: 1}));
+ assertAlways.commandWorked(db[collName].ensureIndex({z: 1}));
+ assertAlways.commandWorked(db[collName].ensureIndex({x: 1, y: 1, z: 1}));
// numDocs should be much less than threadCount, to make more threads use the same docs.
this.numDocs = Math.floor(this.threadCount / 3);
assertAlways.gt(this.numDocs, 0, 'numDocs should be a positive number');
for (var i = 0; i < this.numDocs; ++i) {
- var res = db[collName].insert({ _id: i });
+ var res = db[collName].insert({_id: i});
assertWhenOwnColl.writeOK(res);
assertWhenOwnColl.eq(1, res.nInserted);
}
@@ -95,8 +98,7 @@ var $config = (function() {
if (db.getMongo().writeMode() === 'commands') {
assertWhenOwnColl.eq(res.nModified, 1, tojson(res));
}
- }
- else {
+ } else {
// Zero matches are possible for MMAP v1 because the update will skip a document
// that was invalidated during a yield.
assertWhenOwnColl.contains(res.nMatched, [0, 1], tojson(res));
diff --git a/jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate.js b/jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate.js
index 8c3f6704231..a8debf271e7 100644
--- a/jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate.js
+++ b/jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate.js
@@ -6,33 +6,35 @@
* Does updates that affect multiple fields on multiple documents, using $isolated.
* The collection has an index for each field, and a multikey index for all fields.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_multifield.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_multifield.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
- $config.data.multi = true;
- $config.data.isolated = true;
+ $config.data.multi = true;
+ $config.data.isolated = true;
- $config.data.assertResult = function assertResult(res, db, collName, query) {
- assertAlways.eq(0, res.nUpserted, tojson(res));
- // documents can't move during an update, because we use $isolated
- assertWhenOwnColl.eq(this.numDocs, res.nMatched, tojson(res));
- if (db.getMongo().writeMode() === 'commands') {
- assertWhenOwnColl.eq(this.numDocs, res.nModified, tojson(res));
- }
+ $config.data.assertResult = function assertResult(res, db, collName, query) {
+ assertAlways.eq(0, res.nUpserted, tojson(res));
+ // documents can't move during an update, because we use $isolated
+ assertWhenOwnColl.eq(this.numDocs, res.nMatched, tojson(res));
+ if (db.getMongo().writeMode() === 'commands') {
+ assertWhenOwnColl.eq(this.numDocs, res.nModified, tojson(res));
+ }
- // every thread only increments z, and z starts at 0,
- // so z should always be strictly greater than 0 after an update,
- // even if other threads modify the doc.
- var docs = db[collName].find().toArray();
- assertWhenOwnColl(function() {
- docs.forEach(function(doc) {
- assertWhenOwnColl.eq('number', typeof doc.z);
- assertWhenOwnColl.gt(doc.z, 0);
- });
- });
- };
+ // every thread only increments z, and z starts at 0,
+ // so z should always be strictly greater than 0 after an update,
+ // even if other threads modify the doc.
+ var docs = db[collName].find().toArray();
+ assertWhenOwnColl(function() {
+ docs.forEach(function(doc) {
+ assertWhenOwnColl.eq('number', typeof doc.z);
+ assertWhenOwnColl.gt(doc.z, 0);
+ });
+ });
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate_noindex.js b/jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate_noindex.js
index 6ac6aeabc89..2f9aeded5ad 100644
--- a/jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate_noindex.js
+++ b/jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate_noindex.js
@@ -6,8 +6,8 @@
* Executes the update_multifield_isolated_multiupdate.js workload after
* dropping all non-_id indexes on the collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
var $config = extendWorkload($config, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js b/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js
index 8d9f3d875cc..46532c8db47 100644
--- a/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js
+++ b/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js
@@ -6,46 +6,48 @@
* Does updates that affect multiple fields on multiple documents.
* The collection has an index for each field, and a multikey index for all fields.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_multifield.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
-
-var $config = extendWorkload($config, function($config, $super) {
-
- $config.data.multi = true;
-
- $config.data.assertResult = function(res, db, collName, query) {
- assertAlways.eq(0, res.nUpserted, tojson(res));
-
- if (isMongod(db)) {
- if (isMMAPv1(db)) {
- // If an update triggers a document to move forward, then
- // that document can be matched multiple times. If an update
- // triggers a document to move backwards, then that document
- // can be missed by other threads.
- assertAlways.gte(res.nMatched, 0, tojson(res));
- } else { // non-mmapv1 storage engine
- // TODO: Can we assert exact equality with WiredTiger?
- // What about for other storage engines?
- assertWhenOwnColl.lte(this.numDocs, res.nMatched, tojson(res));
- }
- } else { // mongos
- // In a mixed cluster, it is unknown what underlying storage engine
- // the update operations will be executed against. Thus, we can only
- // make the weakest of all assertions above.
- assertAlways.gte(res.nMatched, 0, tojson(res));
- }
-
- if (db.getMongo().writeMode() === 'commands') {
- assertWhenOwnColl.eq(res.nMatched, res.nModified, tojson(res));
- }
-
- var docs = db[collName].find().toArray();
- docs.forEach(function(doc) {
- assertWhenOwnColl.eq('number', typeof doc.z);
- assertWhenOwnColl.gt(doc.z, 0);
- });
- };
-
- return $config;
-});
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_multifield.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
+
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
+
+ $config.data.multi = true;
+
+ $config.data.assertResult = function(res, db, collName, query) {
+ assertAlways.eq(0, res.nUpserted, tojson(res));
+
+ if (isMongod(db)) {
+ if (isMMAPv1(db)) {
+ // If an update triggers a document to move forward, then
+ // that document can be matched multiple times. If an update
+ // triggers a document to move backwards, then that document
+ // can be missed by other threads.
+ assertAlways.gte(res.nMatched, 0, tojson(res));
+ } else { // non-mmapv1 storage engine
+ // TODO: Can we assert exact equality with WiredTiger?
+ // What about for other storage engines?
+ assertWhenOwnColl.lte(this.numDocs, res.nMatched, tojson(res));
+ }
+ } else { // mongos
+ // In a mixed cluster, it is unknown what underlying storage engine
+ // the update operations will be executed against. Thus, we can only
+ // make the weakest of all assertions above.
+ assertAlways.gte(res.nMatched, 0, tojson(res));
+ }
+
+ if (db.getMongo().writeMode() === 'commands') {
+ assertWhenOwnColl.eq(res.nMatched, res.nModified, tojson(res));
+ }
+
+ var docs = db[collName].find().toArray();
+ docs.forEach(function(doc) {
+ assertWhenOwnColl.eq('number', typeof doc.z);
+ assertWhenOwnColl.gt(doc.z, 0);
+ });
+ };
+
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/update_multifield_multiupdate_noindex.js b/jstests/concurrency/fsm_workloads/update_multifield_multiupdate_noindex.js
index fe12f2e33fb..f2739e329dd 100644
--- a/jstests/concurrency/fsm_workloads/update_multifield_multiupdate_noindex.js
+++ b/jstests/concurrency/fsm_workloads/update_multifield_multiupdate_noindex.js
@@ -6,8 +6,8 @@
* Executes the update_multifield_multiupdate.js workload after dropping all
* non-_id indexes on the collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
var $config = extendWorkload($config, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_multifield_noindex.js b/jstests/concurrency/fsm_workloads/update_multifield_noindex.js
index 0be46a25f6a..22b230d7c9e 100644
--- a/jstests/concurrency/fsm_workloads/update_multifield_noindex.js
+++ b/jstests/concurrency/fsm_workloads/update_multifield_noindex.js
@@ -6,8 +6,8 @@
* Executes the update_multifield.js workload after dropping all non-_id indexes
* on the collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_multifield.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_multifield.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
var $config = extendWorkload($config, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js b/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js
index 93911a4a44f..a799d5dfe43 100644
--- a/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js
+++ b/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js
@@ -10,7 +10,7 @@
*
* Uses an ordered, bulk operation to perform the updates.
*/
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMMAPv1 and isMongod
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMMAPv1 and isMongod
var $config = (function() {
@@ -20,12 +20,14 @@ var $config = (function() {
},
update: function update(db, collName) {
- var updateDoc = { $inc: {} };
+ var updateDoc = {
+ $inc: {}
+ };
updateDoc.$inc[this.fieldName] = 1;
var bulk = db[collName].initializeOrderedBulkOp();
for (var i = 0; i < this.docCount; ++i) {
- bulk.find({ _id: i }).update(updateDoc);
+ bulk.find({_id: i}).update(updateDoc);
}
var result = bulk.execute();
// TODO: this actually does assume that there are no unique indexes.
@@ -64,15 +66,15 @@ var $config = (function() {
};
var transitions = {
- init: { update: 1 },
- update: { find: 1 },
- find: { update: 1 }
+ init: {update: 1},
+ update: {find: 1},
+ find: {update: 1}
};
function setup(db, collName, cluster) {
this.count = 0;
for (var i = 0; i < this.docCount; ++i) {
- db[collName].insert({ _id: i });
+ db[collName].insert({_id: i});
}
}
@@ -82,9 +84,7 @@ var $config = (function() {
states: states,
transitions: transitions,
setup: setup,
- data: {
- docCount: 15
- }
+ data: {docCount: 15}
};
})();
diff --git a/jstests/concurrency/fsm_workloads/update_rename.js b/jstests/concurrency/fsm_workloads/update_rename.js
index c74a657f312..b163b44f690 100644
--- a/jstests/concurrency/fsm_workloads/update_rename.js
+++ b/jstests/concurrency/fsm_workloads/update_rename.js
@@ -17,17 +17,23 @@ var $config = (function() {
var states = {
update: function update(db, collName) {
var from = choose(fieldNames);
- var to = choose(fieldNames.filter(function(n) { return n !== from; }));
- var updater = { $rename: {} };
+ var to = choose(fieldNames.filter(function(n) {
+ return n !== from;
+ }));
+ var updater = {
+ $rename: {}
+ };
updater.$rename[from] = to;
var query = {};
- query[from] = { $exists: 1 };
+ query[from] = {
+ $exists: 1
+ };
var res = db[collName].update(query, updater);
assertAlways.eq(0, res.nUpserted, tojson(res));
- assertWhenOwnColl.contains(res.nMatched, [0, 1], tojson(res));
+ assertWhenOwnColl.contains(res.nMatched, [0, 1], tojson(res));
if (db.getMongo().writeMode() === 'commands') {
assertWhenOwnColl.eq(res.nMatched, res.nModified, tojson(res));
}
@@ -35,7 +41,7 @@ var $config = (function() {
};
var transitions = {
- update: { update: 1 }
+ update: {update: 1}
};
function setup(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/update_rename_noindex.js b/jstests/concurrency/fsm_workloads/update_rename_noindex.js
index bbf19227865..0bcb0cd9145 100644
--- a/jstests/concurrency/fsm_workloads/update_rename_noindex.js
+++ b/jstests/concurrency/fsm_workloads/update_rename_noindex.js
@@ -6,8 +6,8 @@
* Executes the update_rename.js workload after dropping all non-_id indexes on
* the collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_rename.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_rename.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
var $config = extendWorkload($config, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_replace.js b/jstests/concurrency/fsm_workloads/update_replace.js
index bc183d01fbb..b9d4cf75380 100644
--- a/jstests/concurrency/fsm_workloads/update_replace.js
+++ b/jstests/concurrency/fsm_workloads/update_replace.js
@@ -6,7 +6,7 @@
* Does updates that replace an entire document.
* The collection has indexes on some but not all fields.
*/
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
var $config = (function() {
@@ -34,11 +34,7 @@ var $config = (function() {
// returns an update doc
function getRandomUpdateDoc() {
- var choices = [
- {},
- { x: 1, y: 1, z: 1 },
- { a: 1, b: 1, c: 1 }
- ];
+ var choices = [{}, {x: 1, y: 1, z: 1}, {a: 1, b: 1, c: 1}];
return choices[Random.randInt(choices.length)];
}
@@ -51,30 +47,30 @@ var $config = (function() {
var updateDoc = getRandomUpdateDoc();
// apply the update
- var res = db[collName].update({ _id: docIndex }, updateDoc);
+ var res = db[collName].update({_id: docIndex}, updateDoc);
assertResult(db, res);
}
};
var transitions = {
- update: { update: 1 }
+ update: {update: 1}
};
function setup(db, collName, cluster) {
- assertAlways.commandWorked(db[collName].ensureIndex({ a: 1 }));
- assertAlways.commandWorked(db[collName].ensureIndex({ b: 1 }));
+ assertAlways.commandWorked(db[collName].ensureIndex({a: 1}));
+ assertAlways.commandWorked(db[collName].ensureIndex({b: 1}));
// no index on c
- assertAlways.commandWorked(db[collName].ensureIndex({ x: 1 }));
- assertAlways.commandWorked(db[collName].ensureIndex({ y: 1 }));
+ assertAlways.commandWorked(db[collName].ensureIndex({x: 1}));
+ assertAlways.commandWorked(db[collName].ensureIndex({y: 1}));
// no index on z
// numDocs should be much less than threadCount, to make more threads use the same docs.
- this.numDocs = Math.floor(this.threadCount / 3);
+ this.numDocs = Math.floor(this.threadCount / 3);
assertAlways.gt(this.numDocs, 0, 'numDocs should be a positive number');
for (var i = 0; i < this.numDocs; ++i) {
- var res = db[collName].insert({ _id: i });
+ var res = db[collName].insert({_id: i});
assertWhenOwnColl.writeOK(res);
assertWhenOwnColl.eq(1, res.nInserted);
}
diff --git a/jstests/concurrency/fsm_workloads/update_replace_noindex.js b/jstests/concurrency/fsm_workloads/update_replace_noindex.js
index 590326a8edc..a10323fb455 100644
--- a/jstests/concurrency/fsm_workloads/update_replace_noindex.js
+++ b/jstests/concurrency/fsm_workloads/update_replace_noindex.js
@@ -6,8 +6,8 @@
* Executes the update_replace.js workload after dropping all non-_id indexes
* on the collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_replace.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_replace.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
var $config = extendWorkload($config, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_simple.js b/jstests/concurrency/fsm_workloads/update_simple.js
index 7e3c00de390..ae694ace309 100644
--- a/jstests/concurrency/fsm_workloads/update_simple.js
+++ b/jstests/concurrency/fsm_workloads/update_simple.js
@@ -8,7 +8,7 @@
* - whether to $set or $unset its field
* - what value to $set the field to
*/
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
var $config = (function() {
@@ -23,19 +23,13 @@ var $config = (function() {
};
var transitions = {
- set: {
- set: 0.5,
- unset: 0.5
- },
- unset: {
- set: 0.5,
- unset: 0.5
- }
+ set: {set: 0.5, unset: 0.5},
+ unset: {set: 0.5, unset: 0.5}
};
function setup(db, collName, cluster) {
// index on 'value', the field being updated
- assertAlways.commandWorked(db[collName].ensureIndex({ value: 1 }));
+ assertAlways.commandWorked(db[collName].ensureIndex({value: 1}));
// numDocs should be much less than threadCount, to make more threads use the same docs.
this.numDocs = Math.floor(this.threadCount / 5);
@@ -44,7 +38,7 @@ var $config = (function() {
for (var i = 0; i < this.numDocs; ++i) {
// make sure the inserted docs have a 'value' field, so they won't need
// to grow when this workload runs against a capped collection
- var res = db[collName].insert({ _id: i, value: 0 });
+ var res = db[collName].insert({_id: i, value: 0});
assertWhenOwnColl.writeOK(res);
assertWhenOwnColl.eq(1, res.nInserted);
}
@@ -65,8 +59,7 @@ var $config = (function() {
// For non-mmap storage engines we can have a strong assertion that exactly one
// doc will be modified.
assertWhenOwnColl.eq(res.nMatched, 1, tojson(res));
- }
- else {
+ } else {
// Zero matches are possible for MMAP v1 because the update will skip a document
// that was invalidated during a yield.
assertWhenOwnColl.contains(res.nMatched, [0, 1], tojson(res));
@@ -85,9 +78,13 @@ var $config = (function() {
var value = Random.randInt(5);
var updater = {};
- updater[set ? '$set' : '$unset'] = { value: value };
+ updater[set ? '$set' : '$unset'] = {
+ value: value
+ };
- var query = { _id: docIndex };
+ var query = {
+ _id: docIndex
+ };
var res = this.doUpdate(db, collName, query, updater);
this.assertResult(db, res);
},
diff --git a/jstests/concurrency/fsm_workloads/update_simple_eval.js b/jstests/concurrency/fsm_workloads/update_simple_eval.js
index b0f0897a3eb..cf2b10f897a 100644
--- a/jstests/concurrency/fsm_workloads/update_simple_eval.js
+++ b/jstests/concurrency/fsm_workloads/update_simple_eval.js
@@ -9,25 +9,27 @@
* - what value to $set the field to
* and then applies the update using db.runCommand({ eval: ... })
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_simple.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_simple.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
- $config.data.doUpdate = function doUpdate(db, collName, query, updater) {
- var evalResult = db.runCommand({
- eval: function(f, collName, query, updater) {
- return tojson(f(db, collName, query, updater));
- },
- args: [$super.data.doUpdate, collName, query, updater],
- nolock: this.nolock
- });
- assertAlways.commandWorked(evalResult);
- var res = JSON.parse(evalResult.retval);
- return res;
- };
+ $config.data.doUpdate = function doUpdate(db, collName, query, updater) {
+ var evalResult = db.runCommand({
+ eval: function(f, collName, query, updater) {
+ return tojson(f(db, collName, query, updater));
+ },
+ args: [$super.data.doUpdate, collName, query, updater],
+ nolock: this.nolock
+ });
+ assertAlways.commandWorked(evalResult);
+ var res = JSON.parse(evalResult.retval);
+ return res;
+ };
- $config.data.nolock = false;
+ $config.data.nolock = false;
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/update_simple_eval_nolock.js b/jstests/concurrency/fsm_workloads/update_simple_eval_nolock.js
index 0d89e509751..87e24965a7a 100644
--- a/jstests/concurrency/fsm_workloads/update_simple_eval_nolock.js
+++ b/jstests/concurrency/fsm_workloads/update_simple_eval_nolock.js
@@ -5,12 +5,13 @@
*
* Runs update_simple_eval with the eval option { nolock: true }.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_simple_eval.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_simple_eval.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload($config,
+ function($config, $super) {
- $config.data.nolock = true;
+ $config.data.nolock = true;
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/update_simple_noindex.js b/jstests/concurrency/fsm_workloads/update_simple_noindex.js
index b39c71f4266..65bad2855ab 100644
--- a/jstests/concurrency/fsm_workloads/update_simple_noindex.js
+++ b/jstests/concurrency/fsm_workloads/update_simple_noindex.js
@@ -6,8 +6,8 @@
* Executes the update_simple.js workload after dropping all non-_id indexes on
* the collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_simple.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_simple.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
var $config = extendWorkload($config, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_upsert_multi.js b/jstests/concurrency/fsm_workloads/update_upsert_multi.js
index 63aed616bc3..96d83cb5115 100644
--- a/jstests/concurrency/fsm_workloads/update_upsert_multi.js
+++ b/jstests/concurrency/fsm_workloads/update_upsert_multi.js
@@ -16,11 +16,10 @@ var $config = (function() {
var query, update, options;
var res = db[collName].update(
// The counter ensures that the query will not match any existing document.
- query = { tid: this.tid, i: this.counter++ },
- update = { $inc: { n: 1 } },
- options = { multi: true, upsert: true }
- );
- var debugDoc = tojson({ query: query, update: update, options: options, result: res });
+ query = {tid: this.tid, i: this.counter++},
+ update = {$inc: {n: 1}},
+ options = {multi: true, upsert: true});
+ var debugDoc = tojson({query: query, update: update, options: options, result: res});
assertWhenOwnColl.eq(1, res.nUpserted, debugDoc);
assertWhenOwnColl.eq(0, res.nMatched, debugDoc);
if (db.getMongo().writeMode() === 'commands') {
@@ -32,10 +31,9 @@ var $config = (function() {
var res = db[collName].update(
// This query will match an existing document, since the 'insert' state
// always runs first.
- { tid: this.tid },
- { $inc: { n: 1 } },
- { multi: true, upsert: true }
- );
+ {tid: this.tid},
+ {$inc: {n: 1}},
+ {multi: true, upsert: true});
assertWhenOwnColl.eq(0, res.nUpserted, tojson(res));
assertWhenOwnColl.lte(1, res.nMatched, tojson(res));
@@ -53,21 +51,24 @@ var $config = (function() {
// because docs with lower i are newer, so they have had fewer
// opportunities to have n incremented.)
var prevN = Infinity;
- db[collName].find({ tid: this.tid }).sort({ i: 1 }).forEach(function(doc) {
- assertWhenOwnColl.gte(prevN, doc.n);
- prevN = doc.n;
- });
+ db[collName]
+ .find({tid: this.tid})
+ .sort({i: 1})
+ .forEach(function(doc) {
+ assertWhenOwnColl.gte(prevN, doc.n);
+ prevN = doc.n;
+ });
}
};
var transitions = {
- insert: { update: 0.875, assertConsistency: 0.125 },
- update: { insert: 0.875, assertConsistency: 0.125 },
- assertConsistency: { insert: 0.5, update: 0.5 }
+ insert: {update: 0.875, assertConsistency: 0.125},
+ update: {insert: 0.875, assertConsistency: 0.125},
+ assertConsistency: {insert: 0.5, update: 0.5}
};
function setup(db, collName, cluster) {
- assertAlways.commandWorked(db[collName].ensureIndex({ tid: 1, i: 1 }));
+ assertAlways.commandWorked(db[collName].ensureIndex({tid: 1, i: 1}));
}
return {
@@ -76,7 +77,7 @@ var $config = (function() {
states: states,
startState: 'insert',
transitions: transitions,
- data: { counter: 0 },
+ data: {counter: 0},
setup: setup
};
diff --git a/jstests/concurrency/fsm_workloads/update_upsert_multi_noindex.js b/jstests/concurrency/fsm_workloads/update_upsert_multi_noindex.js
index a463c6ba17d..14b6c02d61f 100644
--- a/jstests/concurrency/fsm_workloads/update_upsert_multi_noindex.js
+++ b/jstests/concurrency/fsm_workloads/update_upsert_multi_noindex.js
@@ -6,8 +6,8 @@
* Executes the update_upsert_multi.js workload after dropping all non-_id
* indexes on the collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_upsert_multi.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_upsert_multi.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
var $config = extendWorkload($config, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_where.js b/jstests/concurrency/fsm_workloads/update_where.js
index befc8cde972..ac0bb893160 100644
--- a/jstests/concurrency/fsm_workloads/update_where.js
+++ b/jstests/concurrency/fsm_workloads/update_where.js
@@ -3,44 +3,50 @@
/**
* update_where.js
*
- * Bulk inserts documents in batches of 100, randomly selects ~1/10th of documents inserted by the
+ * Bulk inserts documents in batches of 100, randomly selects ~1/10th of documents inserted by the
* thread and updates them. Also queries by the thread that created the documents to verify counts.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config
-
-var $config = extendWorkload($config, function($config, $super) {
- $config.data.randomBound = 10;
- $config.data.generateDocumentToInsert = function generateDocumentToInsert() {
- return { tid: this.tid, x: Random.randInt(this.randomBound) };
- };
-
- $config.states.update = function update(db, collName) {
- var res = db[collName].update(
- // Server-side JS does not support Random.randInt, so use Math.floor/random instead
- { $where: 'this.x === Math.floor(Math.random() * ' + this.randomBound + ') ' +
- '&& this.tid === ' + this.tid },
- { $set: { x: Random.randInt(this.randomBound) } },
- { multi: true }
- );
- assertAlways.writeOK(res);
-
- if (db.getMongo().writeMode() === 'commands') {
- assertWhenOwnColl.gte(res.nModified, 0);
- assertWhenOwnColl.lte(res.nModified, this.insertedDocuments);
- }
- };
-
- $config.transitions = {
- insert: { insert: 0.2, update: 0.4, query: 0.4 },
- update: { insert: 0.4, update: 0.2, query: 0.4 },
- query: { insert: 0.4, update: 0.4, query: 0.2 }
- };
-
- $config.setup = function setup(db, collName, cluster) {
- /* no-op to prevent index from being created */
- };
-
- return $config;
-});
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config
+
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+ $config.data.randomBound = 10;
+ $config.data.generateDocumentToInsert = function generateDocumentToInsert() {
+ return {
+ tid: this.tid,
+ x: Random.randInt(this.randomBound)
+ };
+ };
+
+ $config.states.update = function update(db, collName) {
+ var res = db[collName].update(
+ // Server-side JS does not support Random.randInt, so use Math.floor/random instead
+ {
+ $where: 'this.x === Math.floor(Math.random() * ' + this.randomBound + ') ' +
+ '&& this.tid === ' + this.tid
+ },
+ {$set: {x: Random.randInt(this.randomBound)}},
+ {multi: true});
+ assertAlways.writeOK(res);
+
+ if (db.getMongo().writeMode() === 'commands') {
+ assertWhenOwnColl.gte(res.nModified, 0);
+ assertWhenOwnColl.lte(res.nModified, this.insertedDocuments);
+ }
+ };
+
+ $config.transitions = {
+ insert: {insert: 0.2, update: 0.4, query: 0.4},
+ update: {insert: 0.4, update: 0.2, query: 0.4},
+ query: {insert: 0.4, update: 0.4, query: 0.2}
+ };
+
+ $config.setup = function setup(db, collName, cluster) {
+ /* no-op to prevent index from being created */
+ };
+
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/upsert_where.js b/jstests/concurrency/fsm_workloads/upsert_where.js
index e89aa56d184..35430ccfa2f 100644
--- a/jstests/concurrency/fsm_workloads/upsert_where.js
+++ b/jstests/concurrency/fsm_workloads/upsert_where.js
@@ -6,38 +6,42 @@
* Bulk inserts documents in batches of 100, randomly selects a document that doesn't exist and
* updates it, and queries by the thread that created the documents to verify counts. */
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
- $config.data.randomBound = 10;
- $config.data.generateDocumentToInsert = function generateDocumentToInsert() {
- return { tid: this.tid, x: Random.randInt(this.randomBound)};
- };
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+ $config.data.randomBound = 10;
+ $config.data.generateDocumentToInsert = function generateDocumentToInsert() {
+ return {
+ tid: this.tid,
+ x: Random.randInt(this.randomBound)
+ };
+ };
- $config.states.upsert = function upsert(db, collName) {
- var res = db[collName].update(
- { $where: 'this.x === ' + this.randomBound + ' && this.tid === ' + this.tid },
- { $set: { x: Random.randInt(this.randomBound), tid: this.tid } },
- { upsert: true }
- );
- assertWhenOwnColl.eq(res.nUpserted, 1);
- var upsertedDocument = db[collName].findOne({ _id: res.getUpsertedId()._id });
- assertWhenOwnColl(function() {
- assertWhenOwnColl.eq(upsertedDocument.tid, this.tid);
- }.bind(this));
- this.insertedDocuments += res.nUpserted;
- };
+ $config.states.upsert = function upsert(db, collName) {
+ var res = db[collName].update(
+ {$where: 'this.x === ' + this.randomBound + ' && this.tid === ' + this.tid},
+ {$set: {x: Random.randInt(this.randomBound), tid: this.tid}},
+ {upsert: true});
+ assertWhenOwnColl.eq(res.nUpserted, 1);
+ var upsertedDocument = db[collName].findOne({_id: res.getUpsertedId()._id});
+ assertWhenOwnColl(function() {
+ assertWhenOwnColl.eq(upsertedDocument.tid, this.tid);
+ }.bind(this));
+ this.insertedDocuments += res.nUpserted;
+ };
- $config.transitions = {
- insert: { insert: 0.2, upsert: 0.4, query: 0.4 },
- upsert: { insert: 0.4, upsert: 0.2, query: 0.4 },
- query: { insert: 0.4, upsert: 0.4, query: 0.2 }
- };
+ $config.transitions = {
+ insert: {insert: 0.2, upsert: 0.4, query: 0.4},
+ upsert: {insert: 0.4, upsert: 0.2, query: 0.4},
+ query: {insert: 0.4, upsert: 0.4, query: 0.2}
+ };
- $config.setup = function setup(db, collName, cluster) {
- /* no-op to prevent index from being created */
- };
+ $config.setup = function setup(db, collName, cluster) {
+ /* no-op to prevent index from being created */
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/yield.js b/jstests/concurrency/fsm_workloads/yield.js
index 369db4a0c85..0ef6aa9b1a0 100644
--- a/jstests/concurrency/fsm_workloads/yield.js
+++ b/jstests/concurrency/fsm_workloads/yield.js
@@ -1,6 +1,6 @@
'use strict';
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod
/**
* yield.js
@@ -31,9 +31,9 @@ var $config = (function() {
doc = cursor.next();
assertAlways(verifier(doc, prevDoc),
'Verifier failed!\nQuery: ' + tojson(cursor._query) + '\n' +
- 'Query plan: ' + tojson(cursor.explain()) + '\n' +
- 'Previous doc: ' + tojson(prevDoc) + '\n' +
- 'This doc: ' + tojson(doc));
+ 'Query plan: ' + tojson(cursor.explain()) + '\n' +
+ 'Previous doc: ' + tojson(prevDoc) + '\n' +
+ 'This doc: ' + tojson(doc));
}
assertAlways.eq(cursor.itcount(), 0);
},
@@ -44,7 +44,9 @@ var $config = (function() {
*/
genUpdateDoc: function genUpdateDoc() {
var newVal = Random.randInt(this.nDocs);
- return { $set: { a: newVal } };
+ return {
+ $set: {a: newVal}
+ };
}
};
@@ -54,7 +56,7 @@ var $config = (function() {
*/
update: function update(db, collName) {
var id = Random.randInt(this.nDocs);
- var randDoc = db[collName].findOne({ _id: id });
+ var randDoc = db[collName].findOne({_id: id});
if (randDoc === null) {
return;
}
@@ -68,9 +70,9 @@ var $config = (function() {
*/
remove: function remove(db, collName) {
var id = Random.randInt(this.nDocs);
- var doc = db[collName].findOne({ _id: id });
+ var doc = db[collName].findOne({_id: id});
if (doc !== null) {
- var res = db[collName].remove({ _id: id });
+ var res = db[collName].remove({_id: id});
assertAlways.writeOK(res);
if (res.nRemoved > 0) {
assertAlways.writeOK(db[collName].insert(doc));
@@ -84,8 +86,7 @@ var $config = (function() {
*/
query: function collScan(db, collName) {
var nMatches = 100;
- var cursor = db[collName].find({ a: { $lt: nMatches } })
- .batchSize(this.batchSize);
+ var cursor = db[collName].find({a: {$lt: nMatches}}).batchSize(this.batchSize);
var collScanVerifier = function collScanVerifier(doc, prevDoc) {
return doc.a < nMatches;
};
@@ -110,9 +111,9 @@ var $config = (function() {
*
*/
var transitions = {
- update: { update: 0.334, remove: 0.333, query: 0.333 },
- remove: { update: 0.333, remove: 0.334, query: 0.333 },
- query: { update: 0.333, remove: 0.333, query: 0.334 }
+ update: {update: 0.334, remove: 0.333, query: 0.333},
+ remove: {update: 0.333, remove: 0.334, query: 0.333},
+ query: {update: 0.333, remove: 0.333, query: 0.334}
};
/*
@@ -126,18 +127,15 @@ var $config = (function() {
cluster.executeOnMongodNodes(function enableFailPoint(db) {
assertAlways.commandWorked(
- db.adminCommand({ configureFailPoint: 'recordNeedsFetchFail', mode: 'alwaysOn' })
- );
+ db.adminCommand({configureFailPoint: 'recordNeedsFetchFail', mode: 'alwaysOn'}));
});
// Lower the following parameters to force even more yields.
cluster.executeOnMongodNodes(function lowerYieldParams(db) {
assertAlways.commandWorked(
- db.adminCommand({ setParameter: 1, internalQueryExecYieldIterations: 5 })
- );
+ db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 5}));
assertAlways.commandWorked(
- db.adminCommand({ setParameter: 1, internalQueryExecYieldPeriodMS: 1 })
- );
+ db.adminCommand({setParameter: 1, internalQueryExecYieldPeriodMS: 1}));
});
// Set up some data to query.
var N = this.nDocs;
@@ -145,9 +143,8 @@ var $config = (function() {
for (var i = 0; i < N; i++) {
// Give each doc some word of text
var word = this.words[i % this.words.length];
- bulk.find({ _id: i }).upsert().updateOne(
- { $set: { a: i, b: N - i, c: i, d: N - i, yield_text: word } }
- );
+ bulk.find({_id: i}).upsert().updateOne(
+ {$set: {a: i, b: N - i, c: i, d: N - i, yield_text: word}});
}
assertAlways.writeOK(bulk.execute());
}
@@ -158,16 +155,13 @@ var $config = (function() {
function teardown(db, collName, cluster) {
cluster.executeOnMongodNodes(function disableFailPoint(db) {
assertAlways.commandWorked(
- db.adminCommand({ configureFailPoint: 'recordNeedsFetchFail', mode: 'off' })
- );
+ db.adminCommand({configureFailPoint: 'recordNeedsFetchFail', mode: 'off'}));
});
cluster.executeOnMongodNodes(function resetYieldParams(db) {
assertAlways.commandWorked(
- db.adminCommand({ setParameter: 1, internalQueryExecYieldIterations: 128 })
- );
+ db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 128}));
assertAlways.commandWorked(
- db.adminCommand({ setParameter: 1, internalQueryExecYieldPeriodMS: 10 })
- );
+ db.adminCommand({setParameter: 1, internalQueryExecYieldPeriodMS: 10}));
});
}
diff --git a/jstests/concurrency/fsm_workloads/yield_and_hashed.js b/jstests/concurrency/fsm_workloads/yield_and_hashed.js
index aa53a354209..d0eef4c8d4f 100644
--- a/jstests/concurrency/fsm_workloads/yield_and_hashed.js
+++ b/jstests/concurrency/fsm_workloads/yield_and_hashed.js
@@ -6,45 +6,70 @@
* Intersperse queries which use the AND_HASH stage with updates and deletes of documents they may
* match.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield_rooted_or.js'); // for $config
-
-var $config = extendWorkload($config, function($config, $super) {
-
- /*
- * Issue a query that will use the AND_HASH stage. This is a little tricky, so use
- * stagedebug to force it to happen. Unfortunately this means it can't be batched.
- */
- $config.states.query = function andHash(db, collName) {
- var nMatches = 100;
- assertAlways.lte(nMatches, this.nDocs);
- // Construct the query plan: two ixscans under an andHashed.
- // Scan c <= nMatches
- var ixscan1 = { ixscan: { args: { name: 'stages_and_hashed', keyPattern: { c: 1 },
- startKey: { '': nMatches }, endKey: {},
- endKeyInclusive: true, direction: -1 } } };
-
- // Scan d >= this.nDocs - nMatches
- var ixscan2 = { ixscan: { args: { name: 'stages_and_hashed', keyPattern: { d: 1 },
- startKey: { '': this.nDocs - nMatches }, endKey: {},
- endKeyInclusive: true, direction: 1 } } };
-
- var andix1ix2 = { andHash: { args: { nodes: [ixscan1, ixscan2] } } };
-
- // On non-MMAP storage engines, index intersection plans will always re-filter
- // the docs to make sure we don't get any spurious matches.
- var fetch = { fetch: { filter: { c: { $lte: nMatches },
- d: { $gte: (this.nDocs - nMatches) } },
- args: { node: andix1ix2 } } };
-
- var res = db.runCommand({ stageDebug: { plan: fetch, collection: collName } });
- assertAlways.commandWorked(res);
- for (var i = 0; i < res.results.length; i++) {
- var result = res.results[i];
- assertAlways.lte(result.c, nMatches);
- assertAlways.gte(result.d, this.nDocs - nMatches);
- }
- };
-
- return $config;
-});
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/yield_rooted_or.js'); // for $config
+
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+
+ /*
+ * Issue a query that will use the AND_HASH stage. This is a little tricky, so use
+ * stagedebug to force it to happen. Unfortunately this means it can't be batched.
+ */
+ $config.states.query = function andHash(db, collName) {
+ var nMatches = 100;
+ assertAlways.lte(nMatches, this.nDocs);
+ // Construct the query plan: two ixscans under an andHashed.
+ // Scan c <= nMatches
+ var ixscan1 = {
+ ixscan: {
+ args: {
+ name: 'stages_and_hashed',
+ keyPattern: {c: 1},
+ startKey: {'': nMatches},
+ endKey: {},
+ endKeyInclusive: true,
+ direction: -1
+ }
+ }
+ };
+
+ // Scan d >= this.nDocs - nMatches
+ var ixscan2 = {
+ ixscan: {
+ args: {
+ name: 'stages_and_hashed',
+ keyPattern: {d: 1},
+ startKey: {'': this.nDocs - nMatches},
+ endKey: {},
+ endKeyInclusive: true,
+ direction: 1
+ }
+ }
+ };
+
+ var andix1ix2 = {
+ andHash: {args: {nodes: [ixscan1, ixscan2]}}
+ };
+
+ // On non-MMAP storage engines, index intersection plans will always re-filter
+ // the docs to make sure we don't get any spurious matches.
+ var fetch = {
+ fetch: {
+ filter: {c: {$lte: nMatches}, d: {$gte: (this.nDocs - nMatches)}},
+ args: {node: andix1ix2}
+ }
+ };
+
+ var res = db.runCommand({stageDebug: {plan: fetch, collection: collName}});
+ assertAlways.commandWorked(res);
+ for (var i = 0; i < res.results.length; i++) {
+ var result = res.results[i];
+ assertAlways.lte(result.c, nMatches);
+ assertAlways.gte(result.d, this.nDocs - nMatches);
+ }
+ };
+
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/yield_and_sorted.js b/jstests/concurrency/fsm_workloads/yield_and_sorted.js
index 11ef5f9b089..42bd94b4acd 100644
--- a/jstests/concurrency/fsm_workloads/yield_and_sorted.js
+++ b/jstests/concurrency/fsm_workloads/yield_and_sorted.js
@@ -6,43 +6,65 @@
* Intersperse queries which use the AND_SORTED stage with updates and deletes of documents they
* may match.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield_rooted_or.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/yield_rooted_or.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- /*
- * Issue a query that will use the AND_SORTED stage. This is a little tricky, so use
- * stagedebug to force it to happen. Unfortunately this means it can't be batched.
- */
- $config.states.query = function andSorted(db, collName) {
- // Not very many docs returned in this, so loop to increase chances of yielding in the
- // middle.
- for (var i = 0; i < 100; i++) {
- // Construct the query plan: two ixscans under an andSorted.
- // Scan a == 0
- var ixscan1 = { ixscan: { args: { name: 'stages_and_sorted', keyPattern: { c: 1 },
- startKey: { '': 0 }, endKey: { '': 0 },
- endKeyInclusive: false, direction: 1 } } };
- // Scan b == this.nDocs
- var ixscan2 = { ixscan: { args: { name: 'stages_and_sorted', keyPattern: { d: 1 },
- startKey: { '': this.nDocs },
- endKey: { '': this.nDocs },
- endKeyInclusive: false, direction: -1 } } };
+ /*
+ * Issue a query that will use the AND_SORTED stage. This is a little tricky, so use
+ * stagedebug to force it to happen. Unfortunately this means it can't be batched.
+ */
+ $config.states.query = function andSorted(db, collName) {
+ // Not very many docs returned in this, so loop to increase chances of yielding in the
+ // middle.
+ for (var i = 0; i < 100; i++) {
+ // Construct the query plan: two ixscans under an andSorted.
+ // Scan a == 0
+ var ixscan1 = {
+ ixscan: {
+ args: {
+ name: 'stages_and_sorted',
+ keyPattern: {c: 1},
+ startKey: {'': 0},
+ endKey: {'': 0},
+ endKeyInclusive: false,
+ direction: 1
+ }
+ }
+ };
+ // Scan b == this.nDocs
+ var ixscan2 = {
+ ixscan: {
+ args: {
+ name: 'stages_and_sorted',
+ keyPattern: {d: 1},
+ startKey: {'': this.nDocs},
+ endKey: {'': this.nDocs},
+ endKeyInclusive: false,
+ direction: -1
+ }
+ }
+ };
- // Intersect the two
- var andix1ix2 = { andSorted: { args: { nodes: [ixscan1, ixscan2] } } };
- var res = db.runCommand({ stageDebug: { collection: collName, plan: andix1ix2 } });
- assertAlways.commandWorked(res);
- for (var j = 0; j < res.results.length; j++) {
- var result = res.results[j];
- // These should always be true, since they're just verifying that the results match
- // the query predicate.
- assertAlways.eq(result.c, 0);
- assertAlways.eq(result.d, this.nDocs);
+ // Intersect the two
+ var andix1ix2 = {
+ andSorted: {args: {nodes: [ixscan1, ixscan2]}}
+ };
+ var res = db.runCommand({stageDebug: {collection: collName, plan: andix1ix2}});
+ assertAlways.commandWorked(res);
+ for (var j = 0; j < res.results.length; j++) {
+ var result = res.results[j];
+ // These should always be true, since they're just verifying that the results
+ // match
+ // the query predicate.
+ assertAlways.eq(result.c, 0);
+ assertAlways.eq(result.d, this.nDocs);
+ }
}
- }
- };
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/yield_fetch.js b/jstests/concurrency/fsm_workloads/yield_fetch.js
index ddec0529e50..0e1073f774a 100644
--- a/jstests/concurrency/fsm_workloads/yield_fetch.js
+++ b/jstests/concurrency/fsm_workloads/yield_fetch.js
@@ -6,26 +6,27 @@
* Intersperse queries which use the FETCH stage with updates and deletes of documents they may
* match.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield_rooted_or.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/yield_rooted_or.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- /*
- * Issue a query that will use the FETCH stage.
- */
- $config.states.query = function fetch(db, collName) {
- var nMatches = 100;
+ /*
+ * Issue a query that will use the FETCH stage.
+ */
+ $config.states.query = function fetch(db, collName) {
+ var nMatches = 100;
- var cursor = db[collName].find({ c: { $lt: nMatches } })
- .batchSize(this.batchSize);
+ var cursor = db[collName].find({c: {$lt: nMatches}}).batchSize(this.batchSize);
- var verifier = function fetchVerifier(doc, prevDoc) {
- return doc.c < nMatches;
- };
+ var verifier = function fetchVerifier(doc, prevDoc) {
+ return doc.c < nMatches;
+ };
- this.advanceCursor(cursor, verifier);
- };
+ this.advanceCursor(cursor, verifier);
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/yield_geo_near.js b/jstests/concurrency/fsm_workloads/yield_geo_near.js
index b75c77d77a1..324c384636e 100644
--- a/jstests/concurrency/fsm_workloads/yield_geo_near.js
+++ b/jstests/concurrency/fsm_workloads/yield_geo_near.js
@@ -5,72 +5,83 @@
*
* Intersperse geo $near queries with updates and deletes of documents they may match.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- /*
- * Use geo $near query to find points near the origin. Note this should be done using the
- * geoNear command, rather than a $near query, as the $near query doesn't work in a sharded
- * environment. Unfortunately this means we cannot batch the request.
- */
- $config.states.query = function geoNear(db, collName) {
- // This distance gets about 80 docs around the origin. There is one doc inserted
- // every 1m^2 and the area scanned by a 5m radius is PI*(5m)^2 ~ 79.
- var maxDistance = 5;
+ /*
+ * Use geo $near query to find points near the origin. Note this should be done using the
+ * geoNear command, rather than a $near query, as the $near query doesn't work in a sharded
+ * environment. Unfortunately this means we cannot batch the request.
+ */
+ $config.states.query = function geoNear(db, collName) {
+ // This distance gets about 80 docs around the origin. There is one doc inserted
+ // every 1m^2 and the area scanned by a 5m radius is PI*(5m)^2 ~ 79.
+ var maxDistance = 5;
- var res = db.runCommand({ geoNear: collName, near: [0, 0], maxDistance: maxDistance });
- assertWhenOwnColl.commandWorked(res); // Could fail if more than 1 2d index.
- assertWhenOwnColl(function verifyResults() {
- var results = res.results;
- var prevDoc = { dis: 0 }; // distance should never be less than 0
- for (var i = 0; i < results.length; i++) {
- var doc = results[i];
- assertAlways.lte(NumberInt(doc.dis), maxDistance); // satisfies query
- assertAlways.lte(prevDoc.dis, doc.dis); // returned in the correct order
- prevDoc = doc;
- }
- });
- };
+ var res = db.runCommand({geoNear: collName, near: [0, 0], maxDistance: maxDistance});
+ assertWhenOwnColl.commandWorked(res); // Could fail if more than 1 2d index.
+ assertWhenOwnColl(function verifyResults() {
+ var results = res.results;
+ var prevDoc = {
+ dis: 0
+ }; // distance should never be less than 0
+ for (var i = 0; i < results.length; i++) {
+ var doc = results[i];
+ assertAlways.lte(NumberInt(doc.dis), maxDistance); // satisfies query
+ assertAlways.lte(prevDoc.dis, doc.dis); // returned in the correct order
+ prevDoc = doc;
+ }
+ });
+ };
- $config.data.genUpdateDoc = function genUpdateDoc() {
- var P = Math.floor(Math.sqrt(this.nDocs));
+ $config.data.genUpdateDoc = function genUpdateDoc() {
+ var P = Math.floor(Math.sqrt(this.nDocs));
- // Move the point to another location within the PxP grid.
- var newX = Random.randInt(P) - P/2;
- var newY = Random.randInt(P) - P/2;
- return { $set: { geo: [newX, newY] } };
- };
+ // Move the point to another location within the PxP grid.
+ var newX = Random.randInt(P) - P / 2;
+ var newY = Random.randInt(P) - P / 2;
+ return {
+ $set: {geo: [newX, newY]}
+ };
+ };
- $config.data.getIndexSpec = function getIndexSpec() {
- return { geo: '2d' };
- };
+ $config.data.getIndexSpec = function getIndexSpec() {
+ return {
+ geo: '2d'
+ };
+ };
- $config.data.getReplaceSpec = function getReplaceSpec(i, coords) {
- return { _id: i, geo: coords };
- };
+ $config.data.getReplaceSpec = function getReplaceSpec(i, coords) {
+ return {
+ _id: i,
+ geo: coords
+ };
+ };
- /*
- * Insert some docs in geo form and make a 2d index.
- */
- $config.setup = function setup(db, collName, cluster) {
- $super.setup.apply(this, arguments);
+ /*
+ * Insert some docs in geo form and make a 2d index.
+ */
+ $config.setup = function setup(db, collName, cluster) {
+ $super.setup.apply(this, arguments);
- var P = Math.floor(Math.sqrt(this.nDocs));
- var i = 0;
- // Set up some points to query (in a PxP grid around 0,0).
- var bulk = db[collName].initializeUnorderedBulkOp();
- for (var x = 0; x < P; x++) {
- for (var y = 0; y < P; y++) {
- var coords = [x - P/2, y - P/2];
- bulk.find({ _id: i }).upsert().replaceOne(this.getReplaceSpec(i, coords));
- i++;
+ var P = Math.floor(Math.sqrt(this.nDocs));
+ var i = 0;
+ // Set up some points to query (in a PxP grid around 0,0).
+ var bulk = db[collName].initializeUnorderedBulkOp();
+ for (var x = 0; x < P; x++) {
+ for (var y = 0; y < P; y++) {
+ var coords = [x - P / 2, y - P / 2];
+ bulk.find({_id: i}).upsert().replaceOne(this.getReplaceSpec(i, coords));
+ i++;
+ }
}
- }
- assertAlways.writeOK(bulk.execute());
- assertAlways.commandWorked(db[collName].ensureIndex(this.getIndexSpec()));
- };
+ assertAlways.writeOK(bulk.execute());
+ assertAlways.commandWorked(db[collName].ensureIndex(this.getIndexSpec()));
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js b/jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js
index 6e9d70177b7..e2a63f8c546 100644
--- a/jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js
+++ b/jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js
@@ -5,79 +5,95 @@
*
* Intersperse geo $near queries with updates of non-geo fields to test deduplication.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield_geo_near.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/yield_geo_near.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- $config.states.remove = function remove(db, collName) {
- var id = Random.randInt(this.nDocs);
- var doc = db[collName].findOne({ _id: id });
- if (doc !== null) {
- var res = db[collName].remove({ _id: id });
- assertAlways.writeOK(res);
- if (res.nRemoved > 0) {
- // Re-insert the document with the same '_id', but an incremented 'timesInserted' to
- // distinguish it from the deleted document.
- doc.timesInserted++;
- assertAlways.writeOK(db[collName].insert(doc));
+ $config.states.remove = function remove(db, collName) {
+ var id = Random.randInt(this.nDocs);
+ var doc = db[collName].findOne({_id: id});
+ if (doc !== null) {
+ var res = db[collName].remove({_id: id});
+ assertAlways.writeOK(res);
+ if (res.nRemoved > 0) {
+ // Re-insert the document with the same '_id', but an incremented
+ // 'timesInserted' to
+ // distinguish it from the deleted document.
+ doc.timesInserted++;
+ assertAlways.writeOK(db[collName].insert(doc));
+ }
}
- }
- };
+ };
- /*
- * Use geo $nearSphere query to find points near the origin. Note this should be done using the
- * geoNear command, rather than a $nearSphere query, as the $nearSphere query doesn't work in a
- * sharded environment. Unfortunately this means we cannot batch the request.
- *
- * Only points are covered in this test as there is no guarantee that geometries indexed in
- * multiple cells will be deduplicated correctly with interspersed updates. If multiple index
- * cells for the same geometry occur in the same search interval, an update may cause geoNear
- * to return the same document multiple times.
- */
- $config.states.query = function geoNear(db, collName) {
- // This distance gets about 80 docs around the origin. There is one doc inserted
- // every 1m^2 and the area scanned by a 5m radius is PI*(5m)^2 ~ 79.
- var maxDistance = 5;
+ /*
+ * Use geo $nearSphere query to find points near the origin. Note this should be done using
+ *the
+ * geoNear command, rather than a $nearSphere query, as the $nearSphere query doesn't work
+ *in a
+ * sharded environment. Unfortunately this means we cannot batch the request.
+ *
+ * Only points are covered in this test as there is no guarantee that geometries indexed in
+ * multiple cells will be deduplicated correctly with interspersed updates. If multiple
+ *index
+ * cells for the same geometry occur in the same search interval, an update may cause
+ *geoNear
+ * to return the same document multiple times.
+ */
+ $config.states.query = function geoNear(db, collName) {
+ // This distance gets about 80 docs around the origin. There is one doc inserted
+ // every 1m^2 and the area scanned by a 5m radius is PI*(5m)^2 ~ 79.
+ var maxDistance = 5;
- var res = db.runCommand({
- geoNear: collName,
- near: [0, 0],
- maxDistance: maxDistance,
- spherical: true
- });
- assertWhenOwnColl.commandWorked(res);
- assertWhenOwnColl(function verifyResults() {
- var results = res.results;
- var seenObjs = [];
- for (var i = 0; i < results.length; i++) {
- var doc = results[i].obj;
+ var res = db.runCommand(
+ {geoNear: collName, near: [0, 0], maxDistance: maxDistance, spherical: true});
+ assertWhenOwnColl.commandWorked(res);
+ assertWhenOwnColl(function verifyResults() {
+ var results = res.results;
+ var seenObjs = [];
+ for (var i = 0; i < results.length; i++) {
+ var doc = results[i].obj;
- // The pair (_id, timesInserted) is the smallest set of attributes that uniquely
- // identifies a document.
- var objToSearchFor = { _id: doc._id, timesInserted: doc.timesInserted };
- var found = seenObjs.some(function(obj) {
- return bsonWoCompare(obj, objToSearchFor) === 0;
- });
- assertWhenOwnColl(!found, 'geoNear command returned the document ' + tojson(doc) +
- ' multiple times: ' + tojson(seenObjs));
- seenObjs.push(objToSearchFor);
- }
- });
- };
+ // The pair (_id, timesInserted) is the smallest set of attributes that uniquely
+ // identifies a document.
+ var objToSearchFor = {
+ _id: doc._id,
+ timesInserted: doc.timesInserted
+ };
+ var found = seenObjs.some(function(obj) {
+ return bsonWoCompare(obj, objToSearchFor) === 0;
+ });
+ assertWhenOwnColl(!found,
+ 'geoNear command returned the document ' + tojson(doc) +
+ ' multiple times: ' + tojson(seenObjs));
+ seenObjs.push(objToSearchFor);
+ }
+ });
+ };
- $config.data.genUpdateDoc = function genUpdateDoc() {
- // Attempts to perform an in-place update to trigger an invalidation on MMAP v1.
- return { $inc: { timesUpdated: 1 } };
- };
+ $config.data.genUpdateDoc = function genUpdateDoc() {
+ // Attempts to perform an in-place update to trigger an invalidation on MMAP v1.
+ return {
+ $inc: {timesUpdated: 1}
+ };
+ };
- $config.data.getIndexSpec = function getIndexSpec() {
- return { geo: '2dsphere' };
- };
+ $config.data.getIndexSpec = function getIndexSpec() {
+ return {
+ geo: '2dsphere'
+ };
+ };
- $config.data.getReplaceSpec = function getReplaceSpec(i, coords) {
- return { _id: i, geo: coords, timesUpdated: 0, timesInserted: 0 };
- };
+ $config.data.getReplaceSpec = function getReplaceSpec(i, coords) {
+ return {
+ _id: i,
+ geo: coords,
+ timesUpdated: 0,
+ timesInserted: 0
+ };
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/yield_id_hack.js b/jstests/concurrency/fsm_workloads/yield_id_hack.js
index ef43bbed49b..81a5acbb0fd 100644
--- a/jstests/concurrency/fsm_workloads/yield_id_hack.js
+++ b/jstests/concurrency/fsm_workloads/yield_id_hack.js
@@ -6,26 +6,29 @@
* Intersperse queries which use the ID_HACK stage with updates and deletes of documents they may
* match.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload($config,
+ function($config, $super) {
- /*
- * Issue a query that will use the ID_HACK stage. This cannot be batched, so issue a
- * number of them to increase the chances of yielding between getting the key and looking
- * up its value.
- */
- $config.states.query = function idHack(db, collName) {
- var nQueries = 100;
- for (var i = 0; i < nQueries; i++) {
- assertAlways.lte(db[collName].find({ _id: i }).itcount(), 1);
- var res = db[collName].findOne({ _id: i });
- if (res !== null) {
- assertAlways.eq(i, res._id);
- }
- }
- };
+ /*
+ * Issue a query that will use the ID_HACK stage. This cannot be
+ * batched, so issue a
+ * number of them to increase the chances of yielding between
+ * getting the key and looking
+ * up its value.
+ */
+ $config.states.query = function idHack(db, collName) {
+ var nQueries = 100;
+ for (var i = 0; i < nQueries; i++) {
+ assertAlways.lte(db[collName].find({_id: i}).itcount(), 1);
+ var res = db[collName].findOne({_id: i});
+ if (res !== null) {
+ assertAlways.eq(i, res._id);
+ }
+ }
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/yield_rooted_or.js b/jstests/concurrency/fsm_workloads/yield_rooted_or.js
index ee742067ff0..4f8415b4fb0 100644
--- a/jstests/concurrency/fsm_workloads/yield_rooted_or.js
+++ b/jstests/concurrency/fsm_workloads/yield_rooted_or.js
@@ -7,40 +7,44 @@
* match.
* Other workloads that need an index on c and d can inherit from this.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
-
-var $config = extendWorkload($config, function($config, $super) {
-
- /*
- * Issue a query with an or stage as the root.
- */
- $config.states.query = function rootedOr(db, collName) {
- var nMatches = 100;
-
- var cursor = db[collName].find({ $or: [ { c: { $lte: nMatches / 2 } },
- { d: { $lte: nMatches / 2 } } ] })
- .batchSize(this.batchSize);
-
- var verifier = function rootedOrVerifier(doc, prevDoc) {
- return (doc.c <= nMatches / 2 || doc.d <= nMatches / 2);
- };
-
- this.advanceCursor(cursor, verifier);
- };
-
- $config.data.genUpdateDoc = function genUpdateDoc() {
- var newC = Random.randInt(this.nDocs);
- var newD = Random.randInt(this.nDocs);
- return { $set: { c: newC, d: newD } };
- };
-
- $config.setup = function setup(db, collName, cluster) {
- $super.setup.apply(this, arguments);
-
- assertAlways.commandWorked(db[collName].ensureIndex({ c: 1 }));
- assertAlways.commandWorked(db[collName].ensureIndex({ d: 1 }));
- };
-
- return $config;
-});
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
+
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
+
+ /*
+ * Issue a query with an or stage as the root.
+ */
+ $config.states.query = function rootedOr(db, collName) {
+ var nMatches = 100;
+
+ var cursor = db[collName].find({
+ $or: [{c: {$lte: nMatches / 2}}, {d: {$lte: nMatches / 2}}]
+ }).batchSize(this.batchSize);
+
+ var verifier = function rootedOrVerifier(doc, prevDoc) {
+ return (doc.c <= nMatches / 2 || doc.d <= nMatches / 2);
+ };
+
+ this.advanceCursor(cursor, verifier);
+ };
+
+ $config.data.genUpdateDoc = function genUpdateDoc() {
+ var newC = Random.randInt(this.nDocs);
+ var newD = Random.randInt(this.nDocs);
+ return {
+ $set: {c: newC, d: newD}
+ };
+ };
+
+ $config.setup = function setup(db, collName, cluster) {
+ $super.setup.apply(this, arguments);
+
+ assertAlways.commandWorked(db[collName].ensureIndex({c: 1}));
+ assertAlways.commandWorked(db[collName].ensureIndex({d: 1}));
+ };
+
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/yield_sort.js b/jstests/concurrency/fsm_workloads/yield_sort.js
index b3aaed620ff..628314fd36b 100644
--- a/jstests/concurrency/fsm_workloads/yield_sort.js
+++ b/jstests/concurrency/fsm_workloads/yield_sort.js
@@ -6,37 +6,40 @@
* Intersperse queries which use the SORT stage with updates and deletes of documents they may
* match.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield_sort_merge.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/yield_sort_merge.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- /*
- * Execute a query that will use the SORT stage.
- */
- $config.states.query = function sort(db, collName) {
- var nMatches = 100;
- // Sort on c, since it's not an indexed field.
- var cursor = db[collName].find({ a: { $lt: nMatches } })
- .sort({ c: -1 })
- .batchSize(this.batchSize);
+ /*
+ * Execute a query that will use the SORT stage.
+ */
+ $config.states.query = function sort(db, collName) {
+ var nMatches = 100;
+ // Sort on c, since it's not an indexed field.
+ var cursor =
+ db[collName].find({a: {$lt: nMatches}}).sort({c: -1}).batchSize(this.batchSize);
- var verifier = function sortVerifier(doc, prevDoc) {
- var correctOrder = true;
- if (prevDoc !== null) {
- correctOrder = (doc.c <= prevDoc.c);
- }
- return doc.a < nMatches && correctOrder;
- };
+ var verifier = function sortVerifier(doc, prevDoc) {
+ var correctOrder = true;
+ if (prevDoc !== null) {
+ correctOrder = (doc.c <= prevDoc.c);
+ }
+ return doc.a < nMatches && correctOrder;
+ };
- this.advanceCursor(cursor, verifier);
- };
+ this.advanceCursor(cursor, verifier);
+ };
- $config.data.genUpdateDoc = function genUpdateDoc() {
- var newA = Random.randInt(this.nDocs);
- var newC = Random.randInt(this.nDocs);
- return { $set: { a: newA, c: newC } };
- };
+ $config.data.genUpdateDoc = function genUpdateDoc() {
+ var newA = Random.randInt(this.nDocs);
+ var newC = Random.randInt(this.nDocs);
+ return {
+ $set: {a: newA, c: newC}
+ };
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/yield_sort_merge.js b/jstests/concurrency/fsm_workloads/yield_sort_merge.js
index cea2a974090..ee63b0d8298 100644
--- a/jstests/concurrency/fsm_workloads/yield_sort_merge.js
+++ b/jstests/concurrency/fsm_workloads/yield_sort_merge.js
@@ -7,50 +7,52 @@
* may match.
* Other workloads that need an index { a: 1, b: 1 } can extend this
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
-
-var $config = extendWorkload($config, function($config, $super) {
-
- /*
- * Execute a query that will use the SORT_MERGE stage.
- */
- $config.states.query = function sortMerge(db, collName) {
- var nMatches = 50; // Don't push this too high, or SORT_MERGE stage won't be selected.
-
- // Build an array [0, nMatches).
- var matches = [];
- for (var i = 0; i < nMatches; i++) {
- matches.push(i);
- }
-
- var cursor = db[collName].find({ a: { $in: matches } })
- .sort({ b: -1 })
- .batchSize(this.batchSize);
-
- var verifier = function sortMergeVerifier(doc, prevDoc) {
- var correctOrder = true;
- if (prevDoc !== null) {
- correctOrder = (doc.b <= prevDoc.b);
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
+
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+
+ /*
+ * Execute a query that will use the SORT_MERGE stage.
+ */
+ $config.states.query = function sortMerge(db, collName) {
+ var nMatches = 50; // Don't push this too high, or SORT_MERGE stage won't be selected.
+
+ // Build an array [0, nMatches).
+ var matches = [];
+ for (var i = 0; i < nMatches; i++) {
+ matches.push(i);
}
- return doc.a < nMatches && correctOrder;
- };
- this.advanceCursor(cursor, verifier);
- };
+ var cursor =
+ db[collName].find({a: {$in: matches}}).sort({b: -1}).batchSize(this.batchSize);
- $config.data.genUpdateDoc = function genUpdateDoc() {
- var newA = Random.randInt(this.nDocs);
- var newB = Random.randInt(this.nDocs);
- return { $set: { a: newA, b: newB } };
- };
+ var verifier = function sortMergeVerifier(doc, prevDoc) {
+ var correctOrder = true;
+ if (prevDoc !== null) {
+ correctOrder = (doc.b <= prevDoc.b);
+ }
+ return doc.a < nMatches && correctOrder;
+ };
- $config.setup = function setup(db, collName, cluster) {
- $super.setup.apply(this, arguments);
+ this.advanceCursor(cursor, verifier);
+ };
- assertAlways.commandWorked(db[collName].ensureIndex({ a: 1, b: 1 }));
- };
+ $config.data.genUpdateDoc = function genUpdateDoc() {
+ var newA = Random.randInt(this.nDocs);
+ var newB = Random.randInt(this.nDocs);
+ return {
+ $set: {a: newA, b: newB}
+ };
+ };
- return $config;
-});
+ $config.setup = function setup(db, collName, cluster) {
+ $super.setup.apply(this, arguments);
+
+ assertAlways.commandWorked(db[collName].ensureIndex({a: 1, b: 1}));
+ };
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/yield_text.js b/jstests/concurrency/fsm_workloads/yield_text.js
index 33f16e85de9..67d7c618319 100644
--- a/jstests/concurrency/fsm_workloads/yield_text.js
+++ b/jstests/concurrency/fsm_workloads/yield_text.js
@@ -6,43 +6,50 @@
* Intersperse queries which use the TEXT stage with updates and deletes of documents they may
* match.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
-
-var $config = extendWorkload($config, function($config, $super) {
-
- /*
- * Pick a random word and search for it using full text search.
- */
- $config.states.query = function text(db, collName) {
- var word = this.words[Random.randInt(this.words.length)];
-
- var cursor = db[collName].find({ $text: { $search: word },
- yield_text: { $exists: true } })
- .batchSize(this.batchSize);
-
- var verifier = function textVerifier(doc, prevDoc) {
- return doc.yield_text.indexOf(word) !== -1;
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
+
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+
+ /*
+ * Pick a random word and search for it using full text search.
+ */
+ $config.states.query = function text(db, collName) {
+ var word = this.words[Random.randInt(this.words.length)];
+
+ var cursor = db[collName].find({
+ $text: {$search: word},
+ yield_text: {$exists: true}
+ }).batchSize(this.batchSize);
+
+ var verifier = function textVerifier(doc, prevDoc) {
+ return doc.yield_text.indexOf(word) !== -1;
+ };
+
+ // If we don't have the right text index, or someone drops our text index, this
+ // assertion
+ // is either pointless or won't work. So only verify the results when we know no one
+ // else
+ // is messing with our indices.
+ assertWhenOwnColl(function verifyTextResults() {
+ this.advanceCursor(cursor, verifier);
+ }.bind(this));
};
- // If we don't have the right text index, or someone drops our text index, this assertion
- // is either pointless or won't work. So only verify the results when we know no one else
- // is messing with our indices.
- assertWhenOwnColl(function verifyTextResults() {
- this.advanceCursor(cursor, verifier);
- }.bind(this));
- };
-
- $config.data.genUpdateDoc = function genUpdateDoc() {
- var newWord = this.words[Random.randInt(this.words.length)];
- return { $set: { yield_text: newWord } };
- };
+ $config.data.genUpdateDoc = function genUpdateDoc() {
+ var newWord = this.words[Random.randInt(this.words.length)];
+ return {
+ $set: {yield_text: newWord}
+ };
+ };
- $config.setup = function setup(db, collName, cluster) {
- $super.setup.apply(this, arguments);
+ $config.setup = function setup(db, collName, cluster) {
+ $super.setup.apply(this, arguments);
- assertWhenOwnColl.commandWorked(db[collName].ensureIndex({ yield_text: 'text' }));
- };
+ assertWhenOwnColl.commandWorked(db[collName].ensureIndex({yield_text: 'text'}));
+ };
- return $config;
-});
+ return $config;
+ });