summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--buildscripts/resmokeconfig/suites/no_passthrough.yml2
-rw-r--r--etc/burn_in_tests.yml3
-rw-r--r--jstests/concurrency/fsm_all_composed.js26
-rw-r--r--jstests/concurrency/fsm_all_sharded_replication.js92
-rw-r--r--jstests/concurrency/fsm_all_sharded_replication_with_balancer.js98
-rw-r--r--jstests/concurrency/fsm_all_sharded_with_stepdowns.js163
-rw-r--r--jstests/concurrency/fsm_all_sharded_with_stepdowns_and_balancer.js171
-rw-r--r--jstests/concurrency/fsm_all_simultaneous.js25
-rw-r--r--jstests/concurrency/fsm_background_workloads/background_base.js49
-rw-r--r--jstests/concurrency/fsm_libs/cluster.js123
-rw-r--r--jstests/concurrency/fsm_libs/errors.js15
-rw-r--r--jstests/concurrency/fsm_libs/resmoke_runner.js1
-rw-r--r--jstests/concurrency/fsm_libs/runner.js179
-rw-r--r--jstests/concurrency/fsm_libs/thread_mgr.js15
14 files changed, 54 insertions, 908 deletions
diff --git a/buildscripts/resmokeconfig/suites/no_passthrough.yml b/buildscripts/resmokeconfig/suites/no_passthrough.yml
index 1129ba416cd..71d6af0b5de 100644
--- a/buildscripts/resmokeconfig/suites/no_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/no_passthrough.yml
@@ -7,8 +7,6 @@ selector:
# Self-tests for the Concurrency testing framework are run as part of this test suite.
- jstests/concurrency/*.js
exclude_files:
- # Exclude files that are not self-tests.
- - jstests/concurrency/fsm_all*.js
# Disable inmem_full as per SERVER-27014
- jstests/noPassthrough/inmem_full.js
diff --git a/etc/burn_in_tests.yml b/etc/burn_in_tests.yml
index beec9eb8fb6..216c483dda4 100644
--- a/etc/burn_in_tests.yml
+++ b/etc/burn_in_tests.yml
@@ -7,6 +7,3 @@ selector:
exclude_tasks:
# Exclude list of jstests file names.
exclude_tests:
- # SERVER-27653 - Blacklist concurrency test runners to avoid potential issues from attempting
- # to run them in parallel with each other.
- - jstests/concurrency/fsm_all*.js
diff --git a/jstests/concurrency/fsm_all_composed.js b/jstests/concurrency/fsm_all_composed.js
deleted file mode 100644
index 159ff0919c9..00000000000
--- a/jstests/concurrency/fsm_all_composed.js
+++ /dev/null
@@ -1,26 +0,0 @@
-'use strict';
-
-load('jstests/concurrency/fsm_libs/runner.js');
-
-var dir = 'jstests/concurrency/fsm_workloads';
-
-var blacklist = [
- // Disabled due to MongoDB restrictions and/or workload restrictions
-
- // These workloads take too long when composed because eval takes a
- // global lock and the composer doesn't honor iteration counts:
- 'remove_single_document_eval.js',
- 'update_simple_eval.js',
-
- // These workloads take too long when composed because server-side JS
- // is slow and the composer doesn't honor iteration counts:
- 'remove_single_document_eval_nolock.js',
- 'update_simple_eval_nolock.js',
-].map(function(file) {
- return dir + '/' + file;
-});
-
-// SERVER-16196 re-enable executing workloads
-// runCompositionOfWorkloads(ls(dir).filter(function(file) {
-// return !Array.contains(blacklist, file);
-// }));
diff --git a/jstests/concurrency/fsm_all_sharded_replication.js b/jstests/concurrency/fsm_all_sharded_replication.js
deleted file mode 100644
index 5313e1a6a04..00000000000
--- a/jstests/concurrency/fsm_all_sharded_replication.js
+++ /dev/null
@@ -1,92 +0,0 @@
-'use strict';
-
-load('jstests/concurrency/fsm_libs/runner.js');
-
-var dir = 'jstests/concurrency/fsm_workloads';
-
-var blacklist = [
- // Disabled due to known bugs
- 'distinct.js', // SERVER-13116 distinct isn't sharding aware
- 'distinct_noindex.js', // SERVER-13116 distinct isn't sharding aware
- 'distinct_projection.js', // SERVER-13116 distinct isn't sharding aware
- 'create_database.js', // SERVER-17397 Drops of sharded namespaces may not fully succeed
- 'drop_database.js', // SERVER-17397 Drops of sharded namespaces may not fully succeed
-
- // Disabled due to SERVER-33753, '.count() without a predicate can be wrong on sharded
- // collections'. This bug is problematic for these workloads because they assert on count()
- // values:
- 'agg_match.js',
-
- // $lookup and $graphLookup are not supported on sharded collections.
- 'agg_graph_lookup.js',
- 'view_catalog_cycle_lookup.js',
-
- // Disabled due to SERVER-20057, 'Concurrent, sharded mapReduces can fail when temporary
- // namespaces collide across mongos processes'
- 'map_reduce_drop.js',
- 'map_reduce_inline.js',
- 'map_reduce_merge.js',
- 'map_reduce_merge_nonatomic.js',
- 'map_reduce_reduce.js',
- 'map_reduce_reduce_nonatomic.js',
- 'map_reduce_replace.js',
- 'map_reduce_replace_nonexistent.js',
- 'map_reduce_replace_remove.js',
-
- // Disabled due to MongoDB restrictions and/or workload restrictions
-
- // These workloads sometimes trigger 'Could not lock auth data update lock'
- // errors because the AuthorizationManager currently waits for only five
- // seconds to acquire the lock for authorization documents
- 'auth_create_role.js',
- 'auth_create_user.js',
- 'auth_drop_role.js',
- 'auth_drop_user.js',
-
- 'agg_group_external.js', // uses >100MB of data, which can overwhelm test hosts
- 'agg_sort_external.js', // uses >100MB of data, which can overwhelm test hosts
- 'compact.js', // compact can only be run against a standalone mongod
- 'compact_simultaneous_padding_bytes.js', // compact can only be run against a mongod
- 'convert_to_capped_collection.js', // convertToCapped can't be run on mongos processes
- 'convert_to_capped_collection_index.js', // convertToCapped can't be run on mongos processes
- 'findAndModify_mixed_queue_unindexed.js', // findAndModify requires a shard key
- 'findAndModify_remove_queue.js', // remove cannot be {} for findAndModify
- 'findAndModify_remove_queue_unindexed.js', // findAndModify requires a shard key
- 'findAndModify_update_collscan.js', // findAndModify requires a shard key
- 'findAndModify_update_grow.js', // can cause OOM kills on test hosts
- 'findAndModify_update_queue.js', // findAndModify requires a shard key
- 'findAndModify_update_queue_unindexed.js', // findAndModify requires a shard key
- 'indexed_insert_eval.js', // eval doesn't work with sharded collections
- 'indexed_insert_eval_nolock.js', // eval doesn't work with sharded collections
-
- 'plan_cache_drop_database.js', // cannot ensureIndex after dropDatabase without sharding first
- 'remove_single_document.js', // our .remove(query, {justOne: true}) calls lack shard keys
- 'remove_single_document_eval.js', // eval doesn't work with sharded collections
- 'remove_single_document_eval_nolock.js', // eval doesn't work with sharded collections
-
- // The rename_* workloads are disabled since renameCollection doesn't work with sharded
- // collections
- 'rename_capped_collection_chain.js',
- 'rename_capped_collection_dbname_chain.js',
- 'rename_capped_collection_dbname_droptarget.js',
- 'rename_capped_collection_droptarget.js',
- 'rename_collection_chain.js',
- 'rename_collection_dbname_chain.js',
- 'rename_collection_dbname_droptarget.js',
- 'rename_collection_droptarget.js',
-
- 'update_simple_eval.js', // eval doesn't work with sharded collections
- 'update_simple_eval_nolock.js', // eval doesn't work with sharded collections
- 'update_upsert_multi.js', // our update queries lack shard keys
- 'update_upsert_multi_noindex.js', // our update queries lack shard keys
- 'upsert_where.js', // cannot use upsert command with $where with sharded collections
- 'yield_and_hashed.js', // stagedebug can only be run against a standalone mongod
- 'yield_and_sorted.js', // stagedebug can only be run against a standalone mongod
-].map(function(file) {
- return dir + '/' + file;
-});
-
-runWorkloadsSerially(ls(dir).filter(function(file) {
- return !Array.contains(blacklist, file);
-}),
- {sharded: {enabled: true}, replication: {enabled: true}});
diff --git a/jstests/concurrency/fsm_all_sharded_replication_with_balancer.js b/jstests/concurrency/fsm_all_sharded_replication_with_balancer.js
deleted file mode 100644
index 191e2316075..00000000000
--- a/jstests/concurrency/fsm_all_sharded_replication_with_balancer.js
+++ /dev/null
@@ -1,98 +0,0 @@
-'use strict';
-
-load('jstests/concurrency/fsm_libs/runner.js');
-
-var dir = 'jstests/concurrency/fsm_workloads';
-
-var blacklist = [
- // Disabled due to known bugs
- 'distinct.js', // SERVER-13116 distinct isn't sharding aware
- 'distinct_noindex.js', // SERVER-13116 distinct isn't sharding aware
- 'distinct_projection.js', // SERVER-13116 distinct isn't sharding aware
- 'create_database.js', // SERVER-17397 Drops of sharded namespaces may not fully succeed
- 'drop_database.js', // SERVER-17397 Drops of sharded namespaces may not fully succeed
- 'remove_where.js', // SERVER-14669 Multi-removes that use $where miscount removed documents
-
- // Disabled due to SERVER-33753, '.count() without a predicate can be wrong on sharded
- // collections'. This bug is problematic for these workloads because they assert on count()
- // values:
- 'agg_match.js',
-
- // $lookup and $graphLookup are not supported on sharded collections.
- 'agg_graph_lookup.js',
- 'view_catalog_cycle_lookup.js',
-
- // Disabled due to SERVER-20057, 'Concurrent, sharded mapReduces can fail when temporary
- // namespaces collide across mongos processes'
- 'map_reduce_drop.js',
- 'map_reduce_inline.js',
- 'map_reduce_merge.js',
- 'map_reduce_merge_nonatomic.js',
- 'map_reduce_reduce.js',
- 'map_reduce_reduce_nonatomic.js',
- 'map_reduce_replace.js',
- 'map_reduce_replace_nonexistent.js',
- 'map_reduce_replace_remove.js',
-
- // Disabled due to SERVER-13364, 'The geoNear command doesn't handle shard versioning, so a
- // concurrent chunk migration may cause duplicate or missing results'
- 'yield_geo_near_dedup.js',
-
- // Disabled due to MongoDB restrictions and/or workload restrictions
-
- // These workloads sometimes trigger 'Could not lock auth data update lock'
- // errors because the AuthorizationManager currently waits for only five
- // seconds to acquire the lock for authorization documents
- 'auth_create_role.js',
- 'auth_create_user.js',
- 'auth_drop_role.js',
- 'auth_drop_user.js',
-
- 'agg_group_external.js', // uses >100MB of data, which can overwhelm test hosts
- 'agg_sort_external.js', // uses >100MB of data, which can overwhelm test hosts
- 'compact.js', // compact can only be run against a standalone mongod
- 'compact_simultaneous_padding_bytes.js', // compact can only be run against a mongod
- 'convert_to_capped_collection.js', // convertToCapped can't be run on mongos processes
- 'convert_to_capped_collection_index.js', // convertToCapped can't be run on mongos processes
- 'findAndModify_mixed_queue_unindexed.js', // findAndModify requires a shard key
- 'findAndModify_remove_queue.js', // remove cannot be {} for findAndModify
- 'findAndModify_remove_queue_unindexed.js', // findAndModify requires a shard key
- 'findAndModify_update_collscan.js', // findAndModify requires a shard key
- 'findAndModify_update_grow.js', // can cause OOM kills on test hosts
- 'findAndModify_update_queue.js', // findAndModify requires a shard key
- 'findAndModify_update_queue_unindexed.js', // findAndModify requires a shard key
- 'indexed_insert_eval.js', // eval doesn't work with sharded collections
- 'indexed_insert_eval_nolock.js', // eval doesn't work with sharded collections
-
- 'plan_cache_drop_database.js', // cannot ensureIndex after dropDatabase without sharding first
- 'remove_single_document.js', // our .remove(query, {justOne: true}) calls lack shard keys
- 'remove_single_document_eval.js', // eval doesn't work with sharded collections
- 'remove_single_document_eval_nolock.js', // eval doesn't work with sharded collections
-
- // The rename_* workloads are disabled since renameCollection doesn't work with sharded
- // collections
- 'rename_capped_collection_chain.js',
- 'rename_capped_collection_dbname_chain.js',
- 'rename_capped_collection_dbname_droptarget.js',
- 'rename_capped_collection_droptarget.js',
- 'rename_collection_chain.js',
- 'rename_collection_dbname_chain.js',
- 'rename_collection_dbname_droptarget.js',
- 'rename_collection_droptarget.js',
-
- 'update_simple_eval.js', // eval doesn't work with sharded collections
- 'update_simple_eval_nolock.js', // eval doesn't work with sharded collections
- 'update_upsert_multi.js', // our update queries lack shard keys
- 'update_upsert_multi_noindex.js', // our update queries lack shard keys
- 'upsert_where.js', // cannot use upsert command with $where with sharded collections
- 'yield_and_hashed.js', // stagedebug can only be run against a standalone mongod
- 'yield_and_sorted.js', // stagedebug can only be run against a standalone mongod
-].map(function(file) {
- return dir + '/' + file;
-});
-
-runWorkloadsSerially(
- ls(dir).filter(function(file) {
- return !Array.contains(blacklist, file);
- }),
- {sharded: {enabled: true, enableBalancer: true}, replication: {enabled: true}});
diff --git a/jstests/concurrency/fsm_all_sharded_with_stepdowns.js b/jstests/concurrency/fsm_all_sharded_with_stepdowns.js
deleted file mode 100644
index 017db324da3..00000000000
--- a/jstests/concurrency/fsm_all_sharded_with_stepdowns.js
+++ /dev/null
@@ -1,163 +0,0 @@
-'use strict';
-
-load('jstests/concurrency/fsm_libs/runner.js');
-
-var dir = 'jstests/concurrency/fsm_workloads';
-
-var blacklist = [
- // Disabled due to known bugs
- 'distinct.js', // SERVER-13116 distinct isn't sharding aware
- 'distinct_noindex.js', // SERVER-13116 distinct isn't sharding aware
- 'distinct_projection.js', // SERVER-13116 distinct isn't sharding aware
- 'create_database.js', // SERVER-17397 Drops of sharded namespaces may not fully succeed
- 'drop_database.js', // SERVER-17397 Drops of sharded namespaces may not fully succeed
-
- // Disabled due to SERVER-33753, '.count() without a predicate can be wrong on sharded
- // collections'. This bug is problematic for these workloads because they assert on count()
- // values:
- 'agg_match.js',
-
- // $lookup and $graphLookup are not supported on sharded collections.
- 'agg_graph_lookup.js',
- 'view_catalog_cycle_lookup.js',
-
- // Disabled due to SERVER-20057, 'Concurrent, sharded mapReduces can fail when temporary
- // namespaces collide across mongos processes'
- 'map_reduce_drop.js',
- 'map_reduce_inline.js',
- 'map_reduce_merge.js',
- 'map_reduce_merge_nonatomic.js',
- 'map_reduce_reduce.js',
- 'map_reduce_reduce_nonatomic.js',
- 'map_reduce_replace.js',
- 'map_reduce_replace_nonexistent.js',
- 'map_reduce_replace_remove.js',
-
- // Disabled due to MongoDB restrictions and/or workload restrictions
-
- // These workloads sometimes trigger 'Could not lock auth data update lock'
- // errors because the AuthorizationManager currently waits for only five
- // seconds to acquire the lock for authorization documents
- 'auth_create_role.js',
- 'auth_create_user.js',
- 'auth_drop_role.js',
- 'auth_drop_user.js',
-
- 'agg_group_external.js', // uses >100MB of data, which can overwhelm test hosts
- 'agg_sort_external.js', // uses >100MB of data, which can overwhelm test hosts
- 'compact.js', // compact can only be run against a standalone mongod
- 'compact_simultaneous_padding_bytes.js', // compact can only be run against a mongod
- 'convert_to_capped_collection.js', // convertToCapped can't be run on mongos processes
- 'convert_to_capped_collection_index.js', // convertToCapped can't be run on mongos processes
- 'findAndModify_mixed_queue_unindexed.js', // findAndModify requires a shard key
- 'findAndModify_remove_queue.js', // remove cannot be {} for findAndModify
- 'findAndModify_remove_queue_unindexed.js', // findAndModify requires a shard key
- 'findAndModify_update_collscan.js', // findAndModify requires a shard key
- 'findAndModify_update_grow.js', // can cause OOM kills on test hosts
- 'findAndModify_update_queue.js', // findAndModify requires a shard key
- 'findAndModify_update_queue_unindexed.js', // findAndModify requires a shard key
- 'indexed_insert_eval.js', // eval doesn't work with sharded collections
- 'indexed_insert_eval_nolock.js', // eval doesn't work with sharded collections
-
- 'plan_cache_drop_database.js', // cannot ensureIndex after dropDatabase without sharding first
- 'remove_single_document.js', // our .remove(query, {justOne: true}) calls lack shard keys
- 'remove_single_document_eval.js', // eval doesn't work with sharded collections
- 'remove_single_document_eval_nolock.js', // eval doesn't work with sharded collections
-
- // The rename_* workloads are disabled since renameCollection doesn't work with sharded
- // collections
- 'rename_capped_collection_chain.js',
- 'rename_capped_collection_dbname_chain.js',
- 'rename_capped_collection_dbname_droptarget.js',
- 'rename_capped_collection_droptarget.js',
- 'rename_collection_chain.js',
- 'rename_collection_dbname_chain.js',
- 'rename_collection_dbname_droptarget.js',
- 'rename_collection_droptarget.js',
-
- 'update_simple_eval.js', // eval doesn't work with sharded collections
- 'update_simple_eval_nolock.js', // eval doesn't work with sharded collections
- 'update_upsert_multi.js', // our update queries lack shard keys
- 'update_upsert_multi_noindex.js', // our update queries lack shard keys
- 'upsert_where.js', // cannot use upsert command with $where with sharded collections
- 'yield_and_hashed.js', // stagedebug can only be run against a standalone mongod
- 'yield_and_sorted.js', // stagedebug can only be run against a standalone mongod
-
- // ChunkHelper directly talks to the config servers and doesn't support retries for network
- // errors
- 'sharded_base_partitioned.js',
- 'sharded_mergeChunks_partitioned.js',
- 'sharded_moveChunk_drop_shard_key_index.js',
- 'sharded_moveChunk_partitioned.js',
- 'sharded_splitChunk_partitioned.js',
-
- // These workloads frequently time out waiting for the distributed lock to drop a sharded
- // collection.
- 'kill_aggregation.js',
- 'kill_rooted_or.js',
- 'view_catalog_cycle_with_drop.js',
- 'view_catalog.js',
-
- // Use getmores.
- 'agg_base.js',
- 'create_index_background.js',
- 'globally_managed_cursors.js',
- 'indexed_insert_ordered_bulk.js',
- 'indexed_insert_text.js',
- 'indexed_insert_unordered_bulk.js',
- 'indexed_insert_upsert.js',
- 'indexed_insert_where.js',
- 'list_indexes.js',
- 'reindex.js',
- 'reindex_background.js',
- 'remove_multiple_documents.js',
- 'remove_where.js',
- 'touch_base.js',
- 'touch_data.js',
- 'touch_index.js',
- 'touch_no_data_no_index.js',
- 'update_where.js',
- 'yield.js',
- 'yield_fetch.js',
- 'yield_rooted_or.js',
- 'yield_sort.js',
- 'yield_sort_merge.js',
- 'yield_text.js',
-
- // Use non retryable writes.
- 'remove_and_bulk_insert.js',
- 'update_and_bulk_insert.js',
- 'update_check_index.js',
- 'update_multifield_multiupdate.js',
- 'update_multifield_multiupdate_noindex.js',
- 'update_ordered_bulk_inc.js',
- 'yield_geo_near.js',
- 'yield_geo_near_dedup.js',
- 'yield_id_hack.js',
-
- // Use non retryable commands.
- 'agg_out.js',
- 'agg_sort.js',
- 'collmod.js',
- 'collmod_separate_collections.js',
- 'view_catalog.js',
- 'kill_multicollection_aggregation.js', // Uses getMore.
- 'invalidated_cursors.js', // Uses getMore.
-
- // The auto_retry_on_network_error.js override needs to overwrite the response from drop on
- // NamespaceNotFound, and since this workload only creates and drops collections there isn't
- // much value in running it.
- 'drop_collection.js',
-].map(function(file) {
- return dir + '/' + file;
-});
-
-runWorkloadsSerially(
- ls(dir).filter(function(file) {
- return !Array.contains(blacklist, file);
- }),
- {
- sharded: {enabled: true, stepdownOptions: {configStepdown: true, shardStepdown: true}},
- replication: {enabled: true}
- },
- {sessionOptions: {retryWrites: true}});
diff --git a/jstests/concurrency/fsm_all_sharded_with_stepdowns_and_balancer.js b/jstests/concurrency/fsm_all_sharded_with_stepdowns_and_balancer.js
deleted file mode 100644
index 54c3951f62a..00000000000
--- a/jstests/concurrency/fsm_all_sharded_with_stepdowns_and_balancer.js
+++ /dev/null
@@ -1,171 +0,0 @@
-'use strict';
-
-load('jstests/concurrency/fsm_libs/runner.js');
-
-var dir = 'jstests/concurrency/fsm_workloads';
-
-var blacklist = [
- // Disabled due to known bugs
- 'distinct.js', // SERVER-13116 distinct isn't sharding aware
- 'distinct_noindex.js', // SERVER-13116 distinct isn't sharding aware
- 'distinct_projection.js', // SERVER-13116 distinct isn't sharding aware
- 'create_database.js', // SERVER-17397 Drops of sharded namespaces may not fully succeed
- 'drop_database.js', // SERVER-17397 Drops of sharded namespaces may not fully succeed
- 'remove_where.js', // SERVER-14669 Multi-removes that use $where miscount removed documents
-
- // Disabled due to SERVER-33753, '.count() without a predicate can be wrong on sharded
- // collections'. This bug is problematic for these workloads because they assert on count()
- // values:
- 'agg_match.js',
-
- // $lookup and $graphLookup are not supported on sharded collections.
- 'agg_graph_lookup.js',
- 'view_catalog_cycle_lookup.js',
-
- // Disabled due to SERVER-20057, 'Concurrent, sharded mapReduces can fail when temporary
- // namespaces collide across mongos processes'
- 'map_reduce_drop.js',
- 'map_reduce_inline.js',
- 'map_reduce_merge.js',
- 'map_reduce_merge_nonatomic.js',
- 'map_reduce_reduce.js',
- 'map_reduce_reduce_nonatomic.js',
- 'map_reduce_replace.js',
- 'map_reduce_replace_nonexistent.js',
- 'map_reduce_replace_remove.js',
-
- // Disabled due to SERVER-13364, 'The geoNear command doesn't handle shard versioning, so a
- // concurrent chunk migration may cause duplicate or missing results'
- 'yield_geo_near_dedup.js',
-
- // Disabled due to MongoDB restrictions and/or workload restrictions
-
- // These workloads sometimes trigger 'Could not lock auth data update lock'
- // errors because the AuthorizationManager currently waits for only five
- // seconds to acquire the lock for authorization documents
- 'auth_create_role.js',
- 'auth_create_user.js',
- 'auth_drop_role.js',
- 'auth_drop_user.js',
-
- 'agg_group_external.js', // uses >100MB of data, which can overwhelm test hosts
- 'agg_sort_external.js', // uses >100MB of data, which can overwhelm test hosts
- 'compact.js', // compact can only be run against a standalone mongod
- 'compact_simultaneous_padding_bytes.js', // compact can only be run against a mongod
- 'convert_to_capped_collection.js', // convertToCapped can't be run on mongos processes
- 'convert_to_capped_collection_index.js', // convertToCapped can't be run on mongos processes
- 'findAndModify_mixed_queue_unindexed.js', // findAndModify requires a shard key
- 'findAndModify_remove_queue.js', // remove cannot be {} for findAndModify
- 'findAndModify_remove_queue_unindexed.js', // findAndModify requires a shard key
- 'findAndModify_update_collscan.js', // findAndModify requires a shard key
- 'findAndModify_update_grow.js', // can cause OOM kills on test hosts
- 'findAndModify_update_queue.js', // findAndModify requires a shard key
- 'findAndModify_update_queue_unindexed.js', // findAndModify requires a shard key
- 'indexed_insert_eval.js', // eval doesn't work with sharded collections
- 'indexed_insert_eval_nolock.js', // eval doesn't work with sharded collections
-
- 'plan_cache_drop_database.js', // cannot ensureIndex after dropDatabase without sharding first
- 'remove_single_document.js', // our .remove(query, {justOne: true}) calls lack shard keys
- 'remove_single_document_eval.js', // eval doesn't work with sharded collections
- 'remove_single_document_eval_nolock.js', // eval doesn't work with sharded collections
-
- // The rename_* workloads are disabled since renameCollection doesn't work with sharded
- // collections
- 'rename_capped_collection_chain.js',
- 'rename_capped_collection_dbname_chain.js',
- 'rename_capped_collection_dbname_droptarget.js',
- 'rename_capped_collection_droptarget.js',
- 'rename_collection_chain.js',
- 'rename_collection_dbname_chain.js',
- 'rename_collection_dbname_droptarget.js',
- 'rename_collection_droptarget.js',
-
- 'update_simple_eval.js', // eval doesn't work with sharded collections
- 'update_simple_eval_nolock.js', // eval doesn't work with sharded collections
- 'update_upsert_multi.js', // our update queries lack shard keys
- 'update_upsert_multi_noindex.js', // our update queries lack shard keys
- 'upsert_where.js', // cannot use upsert command with $where with sharded collections
- 'yield_and_hashed.js', // stagedebug can only be run against a standalone mongod
- 'yield_and_sorted.js', // stagedebug can only be run against a standalone mongod
-
- // ChunkHelper directly talks to the config servers and doesn't support retries for network
- // errors
- 'sharded_base_partitioned.js',
- 'sharded_mergeChunks_partitioned.js',
- 'sharded_moveChunk_drop_shard_key_index.js',
- 'sharded_moveChunk_partitioned.js',
- 'sharded_splitChunk_partitioned.js',
-
- // These workloads frequently time out waiting for the distributed lock to drop a sharded
- // collection.
- 'kill_aggregation.js',
- 'kill_rooted_or.js',
- 'view_catalog_cycle_with_drop.js',
- 'view_catalog.js',
-
- // Use getmores.
- 'agg_base.js',
- 'create_index_background.js',
- 'globally_managed_cursors.js',
- 'indexed_insert_ordered_bulk.js',
- 'indexed_insert_text.js',
- 'indexed_insert_unordered_bulk.js',
- 'indexed_insert_upsert.js',
- 'indexed_insert_where.js',
- 'list_indexes.js',
- 'reindex.js',
- 'reindex_background.js',
- 'remove_multiple_documents.js',
- 'remove_where.js',
- 'touch_base.js',
- 'touch_data.js',
- 'touch_index.js',
- 'touch_no_data_no_index.js',
- 'update_where.js',
- 'yield.js',
- 'yield_fetch.js',
- 'yield_rooted_or.js',
- 'yield_sort.js',
- 'yield_sort_merge.js',
- 'yield_text.js',
- 'kill_multicollection_aggregation.js',
- 'invalidated_cursors.js',
-
- // Use non retryable writes.
- 'remove_and_bulk_insert.js',
- 'update_and_bulk_insert.js',
- 'update_check_index.js',
- 'update_multifield_multiupdate.js',
- 'update_multifield_multiupdate_noindex.js',
- 'update_ordered_bulk_inc.js',
- 'yield_geo_near.js',
- 'yield_geo_near_dedup.js',
- 'yield_id_hack.js',
-
- // Use non retryable commands.
- 'agg_out.js',
- 'agg_sort.js',
- 'collmod.js',
- 'collmod_separate_collections.js',
- 'view_catalog.js',
-
- // The auto_retry_on_network_error.js override needs to overwrite the response from drop on
- // NamespaceNotFound, and since this workload only creates and drops collections there isn't
- // much value in running it.
- 'drop_collection.js',
-].map(function(file) {
- return dir + '/' + file;
-});
-
-runWorkloadsSerially(ls(dir).filter(function(file) {
- return !Array.contains(blacklist, file);
-}),
- {
- sharded: {
- enabled: true,
- enableBalancer: true,
- stepdownOptions: {configStepdown: true, shardStepdown: true}
- },
- replication: {enabled: true}
- },
- {sessionOptions: {retryWrites: true}});
diff --git a/jstests/concurrency/fsm_all_simultaneous.js b/jstests/concurrency/fsm_all_simultaneous.js
deleted file mode 100644
index 60474cf18d0..00000000000
--- a/jstests/concurrency/fsm_all_simultaneous.js
+++ /dev/null
@@ -1,25 +0,0 @@
-// @tags: [SERVER-32997]
-'use strict';
-
-load('jstests/concurrency/fsm_libs/runner.js');
-
-var dir = 'jstests/concurrency/fsm_workloads';
-
-var blacklist = [
- // Disabled due to MongoDB restrictions and/or workload restrictions
-
- // These workloads implicitly assume that their tid ranges are [0, $config.threadCount). This
- // isn't guaranteed to be true when they are run in parallel with other workloads.
- 'list_indexes.js',
- 'update_inc_capped.js',
-
- 'agg_group_external.js', // uses >100MB of data, which can overwhelm test hosts
- 'agg_sort_external.js', // uses >100MB of data, which can overwhelm test hosts
-
-].map(function(file) {
- return dir + '/' + file;
-});
-
-runWorkloadsInParallel(ls(dir).filter(function(file) {
- return !Array.contains(blacklist, file);
-}));
diff --git a/jstests/concurrency/fsm_background_workloads/background_base.js b/jstests/concurrency/fsm_background_workloads/background_base.js
deleted file mode 100644
index abcf751e029..00000000000
--- a/jstests/concurrency/fsm_background_workloads/background_base.js
+++ /dev/null
@@ -1,49 +0,0 @@
-'use strict';
-
-load('jstests/concurrency/fsm_libs/errors.js'); // for IterationEnd
-
-/**
- * background_base.js
- *
- * This is a base background workload that provides two helpful states that can be
- * used in any derived workloads. It provides a 'wait' state that just waits a specified
- * number of milliseconds and a 'checkForTermination' state that checks to see if the
- * foreground workloads have finished. If they have, the state terminates the workload.
- */
-
-var $config = (function() {
-
- var data = {
- millisecondsToSleep: 4000,
- };
-
- var states = {
- wait: function wait(db, collName) {
- sleep(this.millisecondsToSleep);
- },
-
- checkForTermination: function checkForTermination(db, collName) {
- var coll = db.getSiblingDB('config').fsm_background;
- var numDocs = coll.find({terminate: true}).itcount();
- if (numDocs >= 1) {
- throw new IterationEnd('Background workload was instructed to terminate');
- }
- }
- };
-
- var transitions = {wait: {checkForTermination: 1}, checkForTermination: {wait: 1}};
-
- var teardown = function teardown(db, collName, cluster) {
- db.getSiblingDB('config').fsm_background.drop();
- };
-
- return {
- threadCount: 1,
- iterations: Number.MAX_SAFE_INTEGER,
- data: data,
- states: states,
- startState: 'wait',
- teardown: teardown,
- transitions: transitions
- };
-})();
diff --git a/jstests/concurrency/fsm_libs/cluster.js b/jstests/concurrency/fsm_libs/cluster.js
index 73de632ef7f..330507a448d 100644
--- a/jstests/concurrency/fsm_libs/cluster.js
+++ b/jstests/concurrency/fsm_libs/cluster.js
@@ -48,7 +48,6 @@ var Cluster = function(options) {
'sharded.stepdownOptions.configStepdown',
'sharded.stepdownOptions.shardStepdown',
'teardownFunctions',
- 'useExistingConnectionAsSeed',
];
getObjectKeys(options).forEach(function(option) {
@@ -167,30 +166,12 @@ var Cluster = function(options) {
'Expected teardownFunctions.config to be an array');
assert(options.teardownFunctions.config.every(f => (typeof f === 'function')),
'Expected teardownFunctions.config to be an array of functions');
-
- options.useExistingConnectionAsSeed = options.useExistingConnectionAsSeed || false;
- assert.eq('boolean', typeof options.useExistingConnectionAsSeed);
- }
-
- function makeReplSetTestConfig(numReplSetNodes, firstNodeOnlyVote) {
- const REPL_SET_VOTING_LIMIT = 7;
- // Workaround for SERVER-26893 to specify when numReplSetNodes > REPL_SET_VOTING_LIMIT.
- var firstNodeNotVoting = firstNodeOnlyVote ? 1 : REPL_SET_VOTING_LIMIT;
- var rstConfig = [];
- for (var i = 0; i < numReplSetNodes; i++) {
- rstConfig[i] = {};
- if (i >= firstNodeNotVoting) {
- rstConfig[i].rsConfig = {priority: 0, votes: 0};
- }
- }
- return rstConfig;
}
var conn;
var secondaryConns;
var st;
- var rawST; // The raw ShardingTest object for test suites not using resmoke fixtures.
var initialized = false;
var clusterStartTime;
@@ -211,86 +192,25 @@ var Cluster = function(options) {
}
if (options.sharded.enabled) {
- if (options.useExistingConnectionAsSeed) {
- st = new FSMShardingTest(`mongodb://${db.getMongo().host}`);
- } else {
- // TODO: allow 'options' to specify the number of shards and mongos processes
- var shardConfig = {
- shards: options.sharded.numShards,
- mongos: options.sharded.numMongos,
- verbose: verbosityLevel,
- other: {
- enableAutoSplit: options.sharded.enableAutoSplit,
- enableBalancer: options.sharded.enableBalancer,
- }
- };
-
- // TODO: allow 'options' to specify an 'rs' config
- if (options.replication.enabled) {
- shardConfig.rs = {
- nodes: makeReplSetTestConfig(options.replication.numNodes,
- !this.shouldPerformContinuousStepdowns()),
- // Increase the oplog size (in MB) to prevent rollover
- // during write-heavy workloads
- oplogSize: 1024,
- // Set the electionTimeoutMillis to 1 day to prevent unintended elections
- settings: {electionTimeoutMillis: 60 * 60 * 24 * 1000},
- verbose: verbosityLevel
- };
- shardConfig.rsOptions = {};
- }
-
- if (this.shouldPerformContinuousStepdowns()) {
- load('jstests/libs/override_methods/continuous_stepdown.js');
- ContinuousStepdown.configure(options.sharded.stepdownOptions);
- }
-
- rawST = new ShardingTest(shardConfig);
- const hostStr = "mongodb://" + rawST._mongos.map(conn => conn.host).join(",");
-
- st = new FSMShardingTest(hostStr);
- }
+ st = new FSMShardingTest(`mongodb://${db.getMongo().host}`);
conn = st.s(0); // First mongos
- this.teardown = function teardown(opts) {
+ this.teardown = function teardown() {
options.teardownFunctions.mongod.forEach(this.executeOnMongodNodes);
options.teardownFunctions.mongos.forEach(this.executeOnMongosNodes);
options.teardownFunctions.config.forEach(this.executeOnConfigNodes);
+ };
- // Skip checking uuids in teardown if performing continuous stepdowns. The override
- // uses cached connections and expects to run commands against primaries, which is
- // not compatible with stepdowns.
- if (this.shouldPerformContinuousStepdowns()) {
- TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
- }
-
- if (!options.useExistingConnectionAsSeed) {
- rawST.stop(opts);
+ this.reestablishConnectionsAfterFailover = function() {
+ // Call getPrimary() to re-establish the connections in FSMShardingTest
+ // as it is not a transparent proxy for ShardingTest.
+ st._configsvr.getPrimary();
+ for (let rst of st._shard_rsts) {
+ rst.getPrimary();
}
};
- if (this.shouldPerformContinuousStepdowns()) {
- this.startContinuousFailover = function() {
- rawST.startContinuousFailover();
- };
-
- this.reestablishConnectionsAfterFailover = function() {
- // Call getPrimary() to re-establish the connections in FSMShardingTest
- // as it is not a transparent proxy for SharingTest/rawST.
- st._configsvr.getPrimary();
- for (let rst of st._shard_rsts) {
- rst.getPrimary();
- }
- };
-
- this.stopContinuousFailover = function() {
- rawST.stopContinuousFailover(
- {waitForPrimary: true, waitForMongosRetarget: true});
- this.reestablishConnectionsAfterFailover();
- };
- }
-
// Save all mongos, mongod, and ReplSet connections (if any).
var i;
@@ -312,35 +232,14 @@ var Cluster = function(options) {
}
} else if (options.replication.enabled) {
- var replSetConfig = {
- nodes: makeReplSetTestConfig(options.replication.numNodes,
- !this.shouldPerformContinuousStepdowns()),
- // Increase the oplog size (in MB) to prevent rollover during write-heavy workloads
- oplogSize: 1024,
- nodeOptions: {verbose: verbosityLevel},
- // Set the electionTimeoutMillis to 1 day to prevent unintended elections
- settings: {electionTimeoutMillis: 60 * 60 * 24 * 1000}
- };
-
- if (!options.useExistingConnectionAsSeed) {
- rst = new ReplSetTest(replSetConfig);
- rst.startSet();
-
- rst.initiate();
- rst.awaitSecondaryNodes();
- } else {
- rst = new ReplSetTest(db.getMongo().host);
- }
+ rst = new ReplSetTest(db.getMongo().host);
conn = rst.getPrimary();
secondaryConns = rst.getSecondaries();
replSets = [rst];
- this.teardown = function teardown(opts) {
+ this.teardown = function teardown() {
options.teardownFunctions.mongod.forEach(this.executeOnMongodNodes);
- if (!options.useExistingConnectionAsSeed) {
- rst.stopSet(undefined, undefined, opts);
- }
};
this._addReplicaSetConns(rst);
diff --git a/jstests/concurrency/fsm_libs/errors.js b/jstests/concurrency/fsm_libs/errors.js
deleted file mode 100644
index 1b7706962e1..00000000000
--- a/jstests/concurrency/fsm_libs/errors.js
+++ /dev/null
@@ -1,15 +0,0 @@
-'use strict';
-
-/**
- * errors.js
- *
- * This file defines custom errors.
- */
-
-function IterationEnd(message) {
- this.name = 'IterationEnd';
- this.message = message || 'Iteration instructed to terminate';
- this.stack = (new Error()).stack;
-}
-
-IterationEnd.prototype = Object.create(Error.prototype);
diff --git a/jstests/concurrency/fsm_libs/resmoke_runner.js b/jstests/concurrency/fsm_libs/resmoke_runner.js
index 1101bf4cd9f..cbf24e694ed 100644
--- a/jstests/concurrency/fsm_libs/resmoke_runner.js
+++ b/jstests/concurrency/fsm_libs/resmoke_runner.js
@@ -199,7 +199,6 @@
const clusterOptions = {
replication: {enabled: false},
sharded: {enabled: false},
- useExistingConnectionAsSeed: true,
};
const topology = DiscoverTopology.findConnectedNodes(db.getMongo());
diff --git a/jstests/concurrency/fsm_libs/runner.js b/jstests/concurrency/fsm_libs/runner.js
index 9a729ba6008..c40d63f556b 100644
--- a/jstests/concurrency/fsm_libs/runner.js
+++ b/jstests/concurrency/fsm_libs/runner.js
@@ -2,7 +2,6 @@
load('jstests/concurrency/fsm_libs/assert.js');
load('jstests/concurrency/fsm_libs/cluster.js');
-load('jstests/concurrency/fsm_libs/errors.js'); // for IterationEnd
load('jstests/concurrency/fsm_libs/parse_config.js');
load('jstests/concurrency/fsm_libs/thread_mgr.js');
load('jstests/concurrency/fsm_utils/name_utils.js'); // for uniqueCollName and uniqueDBName
@@ -43,7 +42,6 @@ var runner = (function() {
function validateExecutionOptions(mode, options) {
var allowedKeys = [
- 'backgroundWorkloads',
'dbNamePrefix',
'iterationMultiplier',
'sessionOptions',
@@ -91,10 +89,6 @@ var runner = (function() {
assert.lte(options.composeProb, 1);
}
- options.backgroundWorkloads = options.backgroundWorkloads || [];
- assert(Array.isArray(options.backgroundWorkloads),
- 'expected backgroundWorkloads to be an array');
-
if (typeof options.dbNamePrefix !== 'undefined') {
assert.eq(
'string', typeof options.dbNamePrefix, 'expected dbNamePrefix to be a string');
@@ -444,7 +438,7 @@ var runner = (function() {
});
}
- function printWorkloadSchedule(schedule, backgroundWorkloads) {
+ function printWorkloadSchedule(schedule) {
// Print out the entire schedule of workloads to make it easier to run the same
// schedule when debugging test failures.
jsTest.log('The entire schedule of FSM workloads:');
@@ -452,10 +446,6 @@ var runner = (function() {
// Note: We use printjsononeline (instead of just plain printjson) to make it
// easier to reuse the output in variable assignments.
printjsononeline(schedule);
- if (backgroundWorkloads.length > 0) {
- jsTest.log('Background Workloads:');
- printjsononeline(backgroundWorkloads);
- }
jsTest.log('End of schedule');
}
@@ -583,30 +573,18 @@ var runner = (function() {
tojson(session.getOperationTime());
}
- if (cluster.shouldPerformContinuousStepdowns()) {
- cluster.startContinuousFailover();
- }
-
try {
- try {
- // Start this set of foreground workload threads.
- threadMgr.spawnAll(cluster, executionOptions);
- // Allow 20% of foreground threads to fail. This allows the workloads to run on
- // underpowered test hosts.
- threadMgr.checkFailed(0.2);
- } finally {
- // Threads must be joined before destruction, so do this
- // even in the presence of exceptions.
- errors.push(...threadMgr.joinAll().map(
- e => new WorkloadFailure(
- e.err, e.stack, e.tid, 'Foreground ' + e.workloads.join(' '))));
- }
+ // Start this set of foreground workload threads.
+ threadMgr.spawnAll(cluster, executionOptions);
+ // Allow 20% of foreground threads to fail. This allows the workloads to run on
+ // underpowered test hosts.
+ threadMgr.checkFailed(0.2);
} finally {
- if (cluster.shouldPerformContinuousStepdowns()) {
- // Suspend the stepdown threads prior to calling cleanupWorkload() to avoid
- // causing a failover to happen while the data consistency checks are running.
- cluster.stopContinuousFailover();
- }
+ // Threads must be joined before destruction, so do this
+ // even in the presence of exceptions.
+ errors.push(...threadMgr.joinAll().map(
+ e => new WorkloadFailure(
+ e.err, e.stack, e.tid, 'Foreground ' + e.workloads.join(' '))));
}
} finally {
// Call each foreground workload's teardown function. After all teardowns have completed
@@ -676,11 +654,6 @@ var runner = (function() {
loadWorkloadContext(workloads, context, executionOptions, true /* applyMultipliers */);
var threadMgr = new ThreadManager(clusterOptions, executionMode);
- var bgContext = {};
- var bgWorkloads = executionOptions.backgroundWorkloads;
- loadWorkloadContext(bgWorkloads, bgContext, executionOptions, false /* applyMultipliers */);
- var bgThreadMgr = new ThreadManager(clusterOptions);
-
var cluster = new Cluster(clusterOptions);
if (cluster.isSharded()) {
useDropDistLockFailPoint(cluster, clusterOptions);
@@ -688,8 +661,6 @@ var runner = (function() {
cluster.setup();
// Filter out workloads that need to be skipped.
- bgWorkloads =
- bgWorkloads.filter(workload => !shouldSkipWorkload(workload, bgContext, cluster));
workloads = workloads.filter(workload => !shouldSkipWorkload(workload, context, cluster));
// Clean up the state left behind by other tests in the concurrency suite
@@ -711,112 +682,46 @@ var runner = (function() {
var maxAllowedThreads = 100 * executionOptions.threadMultiplier;
Random.setRandomSeed(clusterOptions.seed);
- var bgCleanup = [];
var errors = [];
var configServerData = [];
- let activeException = false;
try {
- prepareCollections(bgWorkloads, bgContext, cluster, clusterOptions, executionOptions);
-
- // Set up the background thread manager for background workloads.
- bgThreadMgr.init(bgWorkloads, bgContext, maxAllowedThreads);
-
- // Call each background workload's setup function.
- bgWorkloads.forEach(function(bgWorkload) {
- // Define "iterations" and "threadCount" properties on the background workload's
- // $config.data object so that they can be used within its setup(), teardown(), and
- // state functions. This must happen after calling bgThreadMgr.init() in case the
- // thread counts needed to be scaled down.
- setIterations(bgContext[bgWorkload].config);
- setThreadCount(bgContext[bgWorkload].config);
+ var schedule = scheduleWorkloads(workloads, executionMode, executionOptions);
+ printWorkloadSchedule(schedule);
+
+ schedule.forEach(function(workloads) {
+ // Make a deep copy of the $config object for each of the workloads that are
+ // going to be run to ensure the workload starts with a fresh version of its
+ // $config.data. This is necessary because $config.data keeps track of
+ // thread-local state that may be updated during a workload's setup(),
+ // teardown(), and state functions.
+ var groupContext = {};
+ workloads.forEach(function(workload) {
+ groupContext[workload] = Object.extend({}, context[workload], true);
+ });
- setupWorkload(bgWorkload, bgContext, cluster);
- bgCleanup.push(bgWorkload);
+ // Run the next group of workloads in the schedule.
+ runWorkloadGroup(threadMgr,
+ workloads,
+ groupContext,
+ cluster,
+ clusterOptions,
+ executionMode,
+ executionOptions,
+ errors,
+ maxAllowedThreads,
+ dbHashBlacklist,
+ configServerData,
+ cleanupOptions);
});
- try {
- // Start background workload threads.
- bgThreadMgr.spawnAll(cluster, executionOptions);
- bgThreadMgr.checkFailed(0);
-
- var schedule = scheduleWorkloads(workloads, executionMode, executionOptions);
- printWorkloadSchedule(schedule, bgWorkloads);
-
- schedule.forEach(function(workloads) {
- // Check if any background workloads have failed.
- if (bgThreadMgr.checkForErrors()) {
- var msg = 'Background workload failed before all foreground workloads ran';
- throw new IterationEnd(msg);
- }
-
- // Make a deep copy of the $config object for each of the workloads that are
- // going to be run to ensure the workload starts with a fresh version of its
- // $config.data. This is necessary because $config.data keeps track of
- // thread-local state that may be updated during a workload's setup(),
- // teardown(), and state functions.
- var groupContext = {};
- workloads.forEach(function(workload) {
- groupContext[workload] = Object.extend({}, context[workload], true);
- });
-
- // Run the next group of workloads in the schedule.
- runWorkloadGroup(threadMgr,
- workloads,
- groupContext,
- cluster,
- clusterOptions,
- executionMode,
- executionOptions,
- errors,
- maxAllowedThreads,
- dbHashBlacklist,
- configServerData,
- cleanupOptions);
- });
- } catch (err) {
- activeException = true;
- throw err;
- } finally {
- // Set a flag so background threads know to terminate.
- bgThreadMgr.markAllForTermination();
- errors.push(...bgThreadMgr.joinAll().map(
- e => new WorkloadFailure(
- e.err, e.stack, e.tid, 'Background ' + e.workloads.join(' '))));
+ if (cluster.isSharded() && errors.length) {
+ jsTest.log('Config Server Data:\n' + tojsononeline(configServerData));
}
- } finally {
- try {
- // Call each background workload's teardown function.
- bgCleanup.forEach(bgWorkload => cleanupWorkload(bgWorkload,
- bgContext,
- cluster,
- errors,
- 'Background',
- dbHashBlacklist,
- cleanupOptions));
- // TODO: Call cleanupWorkloadData() on background workloads here if no background
- // workload teardown functions fail.
-
- // Replace the active exception with an exception describing the errors from all
- // the foreground and background workloads. IterationEnd errors are ignored because
- // they are thrown when the background workloads are instructed by the thread
- // manager to terminate.
- var workloadErrors = errors.filter(e => !e.err.startsWith('IterationEnd:'));
-
- if (cluster.isSharded() && workloadErrors.length) {
- jsTest.log('Config Server Data:\n' + tojsononeline(configServerData));
- }
- throwError(workloadErrors);
- } catch (err) {
- activeException = true;
- throw err;
- } finally {
- // We preserve the data files when an FSM workload failed so that they can later be
- // archived to S3.
- const opts = activeException ? {noCleanData: true} : {};
- cluster.teardown(opts);
- }
+ throwError(errors);
+ } finally {
+ cluster.teardown();
}
}
diff --git a/jstests/concurrency/fsm_libs/thread_mgr.js b/jstests/concurrency/fsm_libs/thread_mgr.js
index bfe4bea5f03..a6e361f1103 100644
--- a/jstests/concurrency/fsm_libs/thread_mgr.js
+++ b/jstests/concurrency/fsm_libs/thread_mgr.js
@@ -53,7 +53,7 @@ var ThreadManager = function(clusterOptions, executionMode = {composed: false})
'the maximum allowed threads must be an integer');
function computeNumThreads() {
- // If we don't have any workloads, such as having no background workloads, return 0.
+ // If we don't have any workloads, return 0.
if (workloads.length === 0) {
return 0;
}
@@ -191,19 +191,6 @@ var ThreadManager = function(clusterOptions, executionMode = {composed: false})
return errors;
};
-
- this.markAllForTermination = function markAllForTermination() {
- if (_workloads.length === 0) {
- return;
- }
-
- // Background threads periodically check the 'fsm_background' collection of the
- // 'config' database for a document specifying { terminate: true }. If such a
- // document is found the background thread terminates.
- var coll = _context[_workloads[0]].db.getSiblingDB('config').fsm_background;
- assert.writeOK(coll.update({terminate: true}, {terminate: true}, {upsert: true}));
-
- };
};
/**