summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRobert Guo <robert.guo@10gen.com>2018-04-11 11:05:13 -0400
committerRobert Guo <robert.guo@10gen.com>2018-04-30 09:56:57 -0400
commit39622745cd5258d40924c8e44be73b5c2e1b4ca4 (patch)
treecfb986e14083c3736747ced43246e92fa51978b0
parent0b04f8bab03c64477b6ffd60fcd1c592dd4ca2b1 (diff)
downloadmongo-39622745cd5258d40924c8e44be73b5c2e1b4ca4.tar.gz
SERVER-19630 allow FSM tests to connect to an existing cluster
-rw-r--r--buildscripts/resmokeconfig/suites/concurrency_sharded.yml18
-rw-r--r--buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency.yml147
-rw-r--r--buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency_and_balancer.yml150
-rw-r--r--buildscripts/resmokeconfig/suites/concurrency_sharded_replication.yml147
-rw-r--r--buildscripts/resmokeconfig/suites/concurrency_sharded_replication_with_balancer.yml150
-rw-r--r--buildscripts/resmokelib/core/programs.py2
-rw-r--r--buildscripts/resmokelib/testing/fixtures/replicaset.py104
-rw-r--r--buildscripts/resmokelib/testing/fixtures/replicaset_utils.py34
-rw-r--r--buildscripts/resmokelib/testing/fixtures/shardedcluster.py19
-rw-r--r--buildscripts/resmokelib/testing/testcases/fsm_workload_test.py17
-rw-r--r--etc/evergreen.yml135
-rw-r--r--jstests/concurrency/fsm_all_sharded_causal_consistency.js98
-rw-r--r--jstests/concurrency/fsm_all_sharded_causal_consistency_and_balancer.js103
-rw-r--r--jstests/concurrency/fsm_libs/cluster.js166
-rw-r--r--jstests/concurrency/fsm_libs/fsm.js15
-rw-r--r--jstests/concurrency/fsm_libs/resmoke_runner.js31
-rw-r--r--jstests/concurrency/fsm_libs/shard_fixture.js10
-rw-r--r--jstests/concurrency/fsm_libs/worker_thread.js16
-rw-r--r--jstests/concurrency/fsm_workloads/sharded_base_partitioned.js24
-rw-r--r--jstests/concurrency/fsm_workloads/sharded_mergeChunks_partitioned.js8
-rw-r--r--jstests/noPassthrough/shard_fixture_selftest.js6
-rw-r--r--src/mongo/shell/collection.js4
22 files changed, 984 insertions, 420 deletions
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded.yml
deleted file mode 100644
index 3e07cddca5d..00000000000
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-test_kind: js_test
-
-selector:
- roots:
- - jstests/concurrency/fsm_all_sharded*.js
- exclude_files:
- # Skip causal consistency and continuous stepdown tests because they're run in separate suites
- - jstests/concurrency/fsm_all_sharded_causal_consistency*.js
- - jstests/concurrency/fsm_all_sharded_with_stepdowns*.js
-
-# Concurrency tests that run against a sharded cluster start one themselves.
-executor:
- archive:
- tests: true
- config:
- shell_options:
- nodb: ''
- readMode: commands
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency.yml
index 0adbc219c91..884dd9e5899 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency.yml
@@ -1,14 +1,153 @@
-test_kind: js_test
+test_kind: fsm_workload_test
selector:
roots:
- - jstests/concurrency/fsm_all_sharded_causal_consistency.js
+ - jstests/concurrency/fsm_workloads/**/*.js
+ exclude_files:
+ # SERVER-13116 distinct isn't sharding aware
+ - jstests/concurrency/fsm_workloads/distinct.js
+ - jstests/concurrency/fsm_workloads/distinct_noindex.js
+ - jstests/concurrency/fsm_workloads/distinct_projection.js
+
+ # SERVER-17397 Drops of sharded namespaces may not fully succeed
+ - jstests/concurrency/fsm_workloads/create_database.js
+ - jstests/concurrency/fsm_workloads/drop_database.js
+
+ # Disabled due to SERVER-33753, '.count() without a predicate can be wrong on sharded
+ # collections'. This bug is problematic for these workloads because they assert on count()
+ # values:
+ - jstests/concurrency/fsm_workloads/agg_match.js
+
+ # SERVER-30983 background indexes are incompatible with causal consistency
+ - jstests/concurrency/fsm_workloads/reindex_background.js
+
+ # $lookup and $graphLookup are not supported on sharded collections.
+ - jstests/concurrency/fsm_workloads/agg_graph_lookup.js
+ - jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js
+
+ # Disabled due to SERVER-20057, 'Concurrent, sharded mapReduces can fail when temporary
+ # namespaces collide across mongos processes'
+ - jstests/concurrency/fsm_workloads/map_reduce_drop.js
+ - jstests/concurrency/fsm_workloads/map_reduce_inline.js
+ - jstests/concurrency/fsm_workloads/map_reduce_merge.js
+ - jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js
+ - jstests/concurrency/fsm_workloads/map_reduce_reduce.js
+ - jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js
+ - jstests/concurrency/fsm_workloads/map_reduce_replace.js
+ - jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js
+ - jstests/concurrency/fsm_workloads/map_reduce_replace_remove.js
+
+ # Disabled due to MongoDB restrictions and/or workload restrictions
+
+ # These workloads sometimes trigger 'Could not lock auth data update lock'
+ # errors because the AuthorizationManager currently waits for only five
+ # seconds to acquire the lock for authorization documents
+ - jstests/concurrency/fsm_workloads/auth_create_role.js
+ - jstests/concurrency/fsm_workloads/auth_create_user.js
+ - jstests/concurrency/fsm_workloads/auth_drop_role.js
+ - jstests/concurrency/fsm_workloads/auth_drop_user.js
+
+ # uses >100MB of data, which can overwhelm test hosts
+ - jstests/concurrency/fsm_workloads/agg_group_external.js
+ - jstests/concurrency/fsm_workloads/agg_sort_external.js
+
+ # compact can only be run against a standalone mongod
+ - jstests/concurrency/fsm_workloads/compact.js
+ - jstests/concurrency/fsm_workloads/compact_simultaneous_padding_bytes.js
+
+ # convertToCapped can't be run on mongos processes
+ - jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
+ - jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js
+
+ # findAndModify requires a shard key
+ - jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js
+ - jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js
+ - jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
+ - jstests/concurrency/fsm_workloads/findAndModify_update_queue.js
+ - jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js
+
+ # remove cannot be {} for findAndModify
+ - jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
+
+ # can cause OOM kills on test hosts
+ - jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
+
+ # the group command cannot be issued against a sharded cluster
+ - jstests/concurrency/fsm_workloads/group.js
+ - jstests/concurrency/fsm_workloads/group_cond.js
+ # eval doesn't work with sharded collections
+ - jstests/concurrency/fsm_workloads/indexed_insert_eval.js
+ - jstests/concurrency/fsm_workloads/indexed_insert_eval_nolock.js
+ - jstests/concurrency/fsm_workloads/remove_single_document_eval.js
+ - jstests/concurrency/fsm_workloads/remove_single_document_eval_nolock.js
+ - jstests/concurrency/fsm_workloads/update_simple_eval.js
+ - jstests/concurrency/fsm_workloads/update_simple_eval_nolock.js
+
+ # cannot ensureIndex after dropDatabase without sharding first
+ - jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
+
+ # our .remove(query, {justOne: true}) calls lack shard keys
+ - jstests/concurrency/fsm_workloads/remove_single_document.js
+
+ # The rename_* workloads are disabled since renameCollection doesn't work with sharded
+ # collections
+ - jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js
+ - jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js
+ - jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
+ - jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
+ - jstests/concurrency/fsm_workloads/rename_collection_chain.js
+ - jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js
+ - jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js
+ - jstests/concurrency/fsm_workloads/rename_collection_droptarget.js
+
+ # our update queries lack shard keys
+ - jstests/concurrency/fsm_workloads/update_upsert_multi.js
+ - jstests/concurrency/fsm_workloads/update_upsert_multi_noindex.js
+
+ # cannot use upsert command with $where with sharded collections
+ - jstests/concurrency/fsm_workloads/upsert_where.js
+
+ # stagedebug can only be run against a standalone mongod
+ - jstests/concurrency/fsm_workloads/yield_and_hashed.js
+ - jstests/concurrency/fsm_workloads/yield_and_sorted.js
-# Concurrency tests that run against a sharded cluster start one themselves.
executor:
archive:
+ hooks:
+ - CheckReplDBHash
+ - ValidateCollections
tests: true
config:
shell_options:
- nodb: ''
readMode: commands
+ global_vars:
+ TestData:
+ runningWithCausalConsistency: true
+ usingReplicaSetShards: true
+ runningWithAutoSplit: false
+ runningWithBalancer: false
+ hooks:
+ - class: CheckReplDBHash
+ - class: ValidateCollections
+ # TODO SERVER-30204: Avoid restarting the MongoDB deployment in order to delete all of the data
+ # files from earlier FSM workloads.
+ - class: CleanEveryN
+ n: 20
+ fixture:
+ class: ShardedClusterFixture
+ enable_balancer: false
+ enable_autosplit: false
+ mongos_options:
+ set_parameters:
+ enableTestCommands: 1
+ shard_options:
+ mongod_options:
+ oplogSize: 1024
+ # TODO SERVER-32572 remove voting_secondaries: true
+ voting_secondaries: true
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
+ num_rs_nodes_per_shard: 3
+ num_shards: 2
+ num_mongos: 2
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency_and_balancer.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency_and_balancer.yml
index 4d850a224c5..224a5ed1c1b 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency_and_balancer.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency_and_balancer.yml
@@ -1,14 +1,156 @@
-test_kind: js_test
+test_kind: fsm_workload_test
selector:
roots:
- - jstests/concurrency/fsm_all_sharded_causal_consistency_and_balancer.js
+ - jstests/concurrency/fsm_workloads/**/*.js
+ exclude_files:
+ # SERVER-13116 distinct isn't sharding aware
+ - jstests/concurrency/fsm_workloads/distinct.js
+ - jstests/concurrency/fsm_workloads/distinct_noindex.js
+ - jstests/concurrency/fsm_workloads/distinct_projection.js
+
+ # SERVER-17397 Drops of sharded namespaces may not fully succeed
+ - jstests/concurrency/fsm_workloads/create_database.js
+ - jstests/concurrency/fsm_workloads/drop_database.js
+
+ # SERVER-14669 Multi-removes that use $where miscount removed documents
+ - jstests/concurrency/fsm_workloads/remove_where.js
+
+ # Disabled due to SERVER-33753, '.count() without a predicate can be wrong on sharded
+ # collections'. This bug is problematic for these workloads because they assert on count()
+ # values:
+ - jstests/concurrency/fsm_workloads/agg_match.js
+
+ # SERVER-30983 background indexes are incompatible with causal consistency
+ - jstests/concurrency/fsm_workloads/reindex_background.js
+
+ # $lookup and $graphLookup are not supported on sharded collections.
+ - jstests/concurrency/fsm_workloads/agg_graph_lookup.js
+ - jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js
+
+ # Disabled due to SERVER-20057, 'Concurrent, sharded mapReduces can fail when temporary
+ # namespaces collide across mongos processes'
+ - jstests/concurrency/fsm_workloads/map_reduce_drop.js
+ - jstests/concurrency/fsm_workloads/map_reduce_inline.js
+ - jstests/concurrency/fsm_workloads/map_reduce_merge.js
+ - jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js
+ - jstests/concurrency/fsm_workloads/map_reduce_reduce.js
+ - jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js
+ - jstests/concurrency/fsm_workloads/map_reduce_replace.js
+ - jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js
+ - jstests/concurrency/fsm_workloads/map_reduce_replace_remove.js
+
+ # Disabled due to SERVER-13364, 'The geoNear command doesn't handle shard versioning, so a
+ # concurrent chunk migration may cause duplicate or missing results'
+ - jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js
+
+ # Disabled due to MongoDB restrictions and/or workload restrictions
+
+ # These workloads sometimes trigger 'Could not lock auth data update lock'
+ # errors because the AuthorizationManager currently waits for only five
+ # seconds to acquire the lock for authorization documents
+ - jstests/concurrency/fsm_workloads/auth_create_role.js
+ - jstests/concurrency/fsm_workloads/auth_create_user.js
+ - jstests/concurrency/fsm_workloads/auth_drop_role.js
+ - jstests/concurrency/fsm_workloads/auth_drop_user.js
+
+ # uses >100MB of data, which can overwhelm test hosts
+ - jstests/concurrency/fsm_workloads/agg_group_external.js
+ - jstests/concurrency/fsm_workloads/agg_sort_external.js
+
+ # compact can only be run against a standalone mongod
+ - jstests/concurrency/fsm_workloads/compact.js
+ - jstests/concurrency/fsm_workloads/compact_simultaneous_padding_bytes.js
+
+ # convertToCapped can't be run on mongos processes
+ - jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
+ - jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js
+
+ # findAndModify requires a shard key
+ - jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js
+ - jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js
+ - jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
+ - jstests/concurrency/fsm_workloads/findAndModify_update_queue.js
+ - jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js
+
+ # remove cannot be {} for findAndModify
+ - jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
+
+ # can cause OOM kills on test hosts
+ - jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
+
+ # the group command cannot be issued against a sharded cluster
+ - jstests/concurrency/fsm_workloads/group.js
+ - jstests/concurrency/fsm_workloads/group_cond.js
+ # eval doesn't work with sharded collections
+ - jstests/concurrency/fsm_workloads/indexed_insert_eval.js
+ - jstests/concurrency/fsm_workloads/indexed_insert_eval_nolock.js
+ - jstests/concurrency/fsm_workloads/remove_single_document_eval.js
+ - jstests/concurrency/fsm_workloads/remove_single_document_eval_nolock.js
+ - jstests/concurrency/fsm_workloads/update_simple_eval.js
+ - jstests/concurrency/fsm_workloads/update_simple_eval_nolock.js
+
+ # cannot ensureIndex after dropDatabase without sharding first
+ - jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
+
+ # our .remove(query, {justOne: true}) calls lack shard keys
+ - jstests/concurrency/fsm_workloads/remove_single_document.js
+
+ # The rename_* workloads are disabled since renameCollection doesn't work with sharded
+ # collections
+ - jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js
+ - jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js
+ - jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
+ - jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
+ - jstests/concurrency/fsm_workloads/rename_collection_chain.js
+ - jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js
+ - jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js
+ - jstests/concurrency/fsm_workloads/rename_collection_droptarget.js
+
+ # our update queries lack shard keys
+ - jstests/concurrency/fsm_workloads/update_upsert_multi.js
+ - jstests/concurrency/fsm_workloads/update_upsert_multi_noindex.js
+
+ # cannot use upsert command with $where with sharded collections
+ - jstests/concurrency/fsm_workloads/upsert_where.js
+
+ # stagedebug can only be run against a standalone mongod
+ - jstests/concurrency/fsm_workloads/yield_and_hashed.js
+ - jstests/concurrency/fsm_workloads/yield_and_sorted.js
-# Concurrency tests that run against a sharded cluster start one themselves.
executor:
archive:
+ hooks:
+ - CheckReplDBHash
+ - ValidateCollections
tests: true
config:
shell_options:
- nodb: ''
readMode: commands
+ global_vars:
+ TestData:
+ runningWithCausalConsistency: true
+ usingReplicaSetShards: true
+ hooks:
+ - class: CheckReplDBHash
+ - class: ValidateCollections
+ # TODO SERVER-30204: Avoid restarting the MongoDB deployment in order to delete all of the data
+ # files from earlier FSM workloads.
+ - class: CleanEveryN
+ n: 20
+ fixture:
+ class: ShardedClusterFixture
+ mongos_options:
+ set_parameters:
+ enableTestCommands: 1
+ shard_options:
+ mongod_options:
+ oplogSize: 1024
+ # TODO SERVER-32572 remove voting_secondaries: true
+ voting_secondaries: true
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
+ num_rs_nodes_per_shard: 3
+ num_shards: 2
+ num_mongos: 2
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_replication.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_replication.yml
new file mode 100644
index 00000000000..aaa9181a5db
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_replication.yml
@@ -0,0 +1,147 @@
+test_kind: fsm_workload_test
+
+selector:
+ roots:
+ - jstests/concurrency/fsm_workloads/**/*.js
+ exclude_files:
+ # SERVER-13116 distinct isn't sharding aware
+ - jstests/concurrency/fsm_workloads/distinct.js
+ - jstests/concurrency/fsm_workloads/distinct_noindex.js
+ - jstests/concurrency/fsm_workloads/distinct_projection.js
+
+ # SERVER-17397 Drops of sharded namespaces may not fully succeed
+ - jstests/concurrency/fsm_workloads/create_database.js
+ - jstests/concurrency/fsm_workloads/drop_database.js
+
+ # Disabled due to SERVER-33753, '.count() without a predicate can be wrong on sharded
+ # collections'. This bug is problematic for these workloads because they assert on count()
+ # values:
+ - jstests/concurrency/fsm_workloads/agg_match.js
+
+ # $lookup and $graphLookup are not supported on sharded collections.
+ - jstests/concurrency/fsm_workloads/agg_graph_lookup.js
+ - jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js
+
+ # Disabled due to SERVER-20057, 'Concurrent, sharded mapReduces can fail when temporary
+ # namespaces collide across mongos processes'
+ - jstests/concurrency/fsm_workloads/map_reduce_drop.js
+ - jstests/concurrency/fsm_workloads/map_reduce_inline.js
+ - jstests/concurrency/fsm_workloads/map_reduce_merge.js
+ - jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js
+ - jstests/concurrency/fsm_workloads/map_reduce_reduce.js
+ - jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js
+ - jstests/concurrency/fsm_workloads/map_reduce_replace.js
+ - jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js
+ - jstests/concurrency/fsm_workloads/map_reduce_replace_remove.js
+
+ # Disabled due to MongoDB restrictions and/or workload restrictions
+
+ # These workloads sometimes trigger 'Could not lock auth data update lock'
+ # errors because the AuthorizationManager currently waits for only five
+ # seconds to acquire the lock for authorization documents
+ - jstests/concurrency/fsm_workloads/auth_create_role.js
+ - jstests/concurrency/fsm_workloads/auth_create_user.js
+ - jstests/concurrency/fsm_workloads/auth_drop_role.js
+ - jstests/concurrency/fsm_workloads/auth_drop_user.js
+
+ # uses >100MB of data, which can overwhelm test hosts
+ - jstests/concurrency/fsm_workloads/agg_group_external.js
+ - jstests/concurrency/fsm_workloads/agg_sort_external.js
+
+ # compact can only be run against a standalone mongod
+ - jstests/concurrency/fsm_workloads/compact.js
+ - jstests/concurrency/fsm_workloads/compact_simultaneous_padding_bytes.js
+
+ # convertToCapped can't be run on mongos processes
+ - jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
+ - jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js
+
+ # findAndModify requires a shard key
+ - jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js
+ - jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js
+ - jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
+ - jstests/concurrency/fsm_workloads/findAndModify_update_queue.js
+ - jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js
+
+ # remove cannot be {} for findAndModify
+ - jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
+
+ # can cause OOM kills on test hosts
+ - jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
+
+ # the group command cannot be issued against a sharded cluster
+ - jstests/concurrency/fsm_workloads/group.js
+ - jstests/concurrency/fsm_workloads/group_cond.js
+ # eval doesn't work with sharded collections
+ - jstests/concurrency/fsm_workloads/indexed_insert_eval.js
+ - jstests/concurrency/fsm_workloads/indexed_insert_eval_nolock.js
+ - jstests/concurrency/fsm_workloads/remove_single_document_eval.js
+ - jstests/concurrency/fsm_workloads/remove_single_document_eval_nolock.js
+ - jstests/concurrency/fsm_workloads/update_simple_eval.js
+ - jstests/concurrency/fsm_workloads/update_simple_eval_nolock.js
+
+ # cannot ensureIndex after dropDatabase without sharding first
+ - jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
+
+ # our .remove(query, {justOne: true}) calls lack shard keys
+ - jstests/concurrency/fsm_workloads/remove_single_document.js
+
+ # The rename_* workloads are disabled since renameCollection doesn't work with sharded
+ # collections
+ - jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js
+ - jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js
+ - jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
+ - jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
+ - jstests/concurrency/fsm_workloads/rename_collection_chain.js
+ - jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js
+ - jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js
+ - jstests/concurrency/fsm_workloads/rename_collection_droptarget.js
+
+ # our update queries lack shard keys
+ - jstests/concurrency/fsm_workloads/update_upsert_multi.js
+ - jstests/concurrency/fsm_workloads/update_upsert_multi_noindex.js
+
+ # cannot use upsert command with $where with sharded collections
+ - jstests/concurrency/fsm_workloads/upsert_where.js
+
+ # stagedebug can only be run against a standalone mongod
+ - jstests/concurrency/fsm_workloads/yield_and_hashed.js
+ - jstests/concurrency/fsm_workloads/yield_and_sorted.js
+
+executor:
+ archive:
+ hooks:
+ - CheckReplDBHash
+ - ValidateCollections
+ tests: true
+ config:
+ shell_options:
+ readMode: commands
+ global_vars:
+ TestData:
+ usingReplicaSetShards: true
+ runningWithAutoSplit: false
+ runningWithBalancer: false
+ hooks:
+ - class: CheckReplDBHash
+ - class: ValidateCollections
+ # TODO SERVER-30204: Avoid restarting the MongoDB deployment in order to delete all of the data
+ # files from earlier FSM workloads.
+ - class: CleanEveryN
+ n: 20
+ fixture:
+ class: ShardedClusterFixture
+ enable_balancer: false
+ enable_autosplit: false
+ mongos_options:
+ set_parameters:
+ enableTestCommands: 1
+ shard_options:
+ mongod_options:
+ oplogSize: 1024
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
+ num_rs_nodes_per_shard: 3
+ num_shards: 2
+ num_mongos: 2
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_replication_with_balancer.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_replication_with_balancer.yml
new file mode 100644
index 00000000000..b15d3ab98d0
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_replication_with_balancer.yml
@@ -0,0 +1,150 @@
+test_kind: fsm_workload_test
+
+selector:
+ roots:
+ - jstests/concurrency/fsm_workloads/**/*.js
+ exclude_files:
+ # SERVER-13116 distinct isn't sharding aware
+ - jstests/concurrency/fsm_workloads/distinct.js
+ - jstests/concurrency/fsm_workloads/distinct_noindex.js
+ - jstests/concurrency/fsm_workloads/distinct_projection.js
+
+ # SERVER-17397 Drops of sharded namespaces may not fully succeed
+ - jstests/concurrency/fsm_workloads/create_database.js
+ - jstests/concurrency/fsm_workloads/drop_database.js
+
+ # SERVER-14669 Multi-removes that use $where miscount removed documents
+ - jstests/concurrency/fsm_workloads/remove_where.js
+
+ # Disabled due to SERVER-33753, '.count() without a predicate can be wrong on sharded
+ # collections'. This bug is problematic for these workloads because they assert on count()
+ # values:
+ - jstests/concurrency/fsm_workloads/agg_match.js
+
+ # $lookup and $graphLookup are not supported on sharded collections.
+ - jstests/concurrency/fsm_workloads/agg_graph_lookup.js
+ - jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js
+
+ # Disabled due to SERVER-20057, 'Concurrent, sharded mapReduces can fail when temporary
+ # namespaces collide across mongos processes'
+ - jstests/concurrency/fsm_workloads/map_reduce_drop.js
+ - jstests/concurrency/fsm_workloads/map_reduce_inline.js
+ - jstests/concurrency/fsm_workloads/map_reduce_merge.js
+ - jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js
+ - jstests/concurrency/fsm_workloads/map_reduce_reduce.js
+ - jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js
+ - jstests/concurrency/fsm_workloads/map_reduce_replace.js
+ - jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js
+ - jstests/concurrency/fsm_workloads/map_reduce_replace_remove.js
+
+ # Disabled due to SERVER-13364, 'The geoNear command doesn't handle shard versioning, so a
+ # concurrent chunk migration may cause duplicate or missing results'
+ - jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js
+
+ # Disabled due to MongoDB restrictions and/or workload restrictions
+
+ # These workloads sometimes trigger 'Could not lock auth data update lock'
+ # errors because the AuthorizationManager currently waits for only five
+ # seconds to acquire the lock for authorization documents
+ - jstests/concurrency/fsm_workloads/auth_create_role.js
+ - jstests/concurrency/fsm_workloads/auth_create_user.js
+ - jstests/concurrency/fsm_workloads/auth_drop_role.js
+ - jstests/concurrency/fsm_workloads/auth_drop_user.js
+
+ # uses >100MB of data, which can overwhelm test hosts
+ - jstests/concurrency/fsm_workloads/agg_group_external.js
+ - jstests/concurrency/fsm_workloads/agg_sort_external.js
+
+ # compact can only be run against a standalone mongod
+ - jstests/concurrency/fsm_workloads/compact.js
+ - jstests/concurrency/fsm_workloads/compact_simultaneous_padding_bytes.js
+
+ # convertToCapped can't be run on mongos processes
+ - jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
+ - jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js
+
+ # findAndModify requires a shard key
+ - jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js
+ - jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js
+ - jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
+ - jstests/concurrency/fsm_workloads/findAndModify_update_queue.js
+ - jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js
+
+ # remove cannot be {} for findAndModify
+ - jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
+
+ # can cause OOM kills on test hosts
+ - jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
+
+ # the group command cannot be issued against a sharded cluster
+ - jstests/concurrency/fsm_workloads/group.js
+ - jstests/concurrency/fsm_workloads/group_cond.js
+ # eval doesn't work with sharded collections
+ - jstests/concurrency/fsm_workloads/indexed_insert_eval.js
+ - jstests/concurrency/fsm_workloads/indexed_insert_eval_nolock.js
+ - jstests/concurrency/fsm_workloads/remove_single_document_eval.js
+ - jstests/concurrency/fsm_workloads/remove_single_document_eval_nolock.js
+ - jstests/concurrency/fsm_workloads/update_simple_eval.js
+ - jstests/concurrency/fsm_workloads/update_simple_eval_nolock.js
+
+ # cannot ensureIndex after dropDatabase without sharding first
+ - jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
+
+ # our .remove(query, {justOne: true}) calls lack shard keys
+ - jstests/concurrency/fsm_workloads/remove_single_document.js
+
+ # The rename_* workloads are disabled since renameCollection doesn't work with sharded
+ # collections
+ - jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js
+ - jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js
+ - jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
+ - jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
+ - jstests/concurrency/fsm_workloads/rename_collection_chain.js
+ - jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js
+ - jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js
+ - jstests/concurrency/fsm_workloads/rename_collection_droptarget.js
+
+ # our update queries lack shard keys
+ - jstests/concurrency/fsm_workloads/update_upsert_multi.js
+ - jstests/concurrency/fsm_workloads/update_upsert_multi_noindex.js
+
+ # cannot use upsert command with $where with sharded collections
+ - jstests/concurrency/fsm_workloads/upsert_where.js
+
+ # stagedebug can only be run against a standalone mongod
+ - jstests/concurrency/fsm_workloads/yield_and_hashed.js
+ - jstests/concurrency/fsm_workloads/yield_and_sorted.js
+
+executor:
+ archive:
+ hooks:
+ - CheckReplDBHash
+ - ValidateCollections
+ tests: true
+ config:
+ shell_options:
+ readMode: commands
+ global_vars:
+ TestData:
+ usingReplicaSetShards: true
+ hooks:
+ - class: CheckReplDBHash
+ - class: ValidateCollections
+ # TODO SERVER-30204: Avoid restarting the MongoDB deployment in order to delete all of the data
+ # files from earlier FSM workloads.
+ - class: CleanEveryN
+ n: 20
+ fixture:
+ class: ShardedClusterFixture
+ mongos_options:
+ set_parameters:
+ enableTestCommands: 1
+ shard_options:
+ mongod_options:
+ oplogSize: 1024
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
+ num_rs_nodes_per_shard: 3
+ num_shards: 2
+ num_mongos: 2
diff --git a/buildscripts/resmokelib/core/programs.py b/buildscripts/resmokelib/core/programs.py
index 7b8693e2dfb..ebce06fb04d 100644
--- a/buildscripts/resmokelib/core/programs.py
+++ b/buildscripts/resmokelib/core/programs.py
@@ -40,7 +40,7 @@ def mongod_program( # pylint: disable=too-many-branches
# complete first. It defaults to 900, or 15 minutes, which is prohibitively long for tests.
# Setting it in the .yml file overrides this.
if "shardsvr" in kwargs and "orphanCleanupDelaySecs" not in suite_set_parameters:
- suite_set_parameters["orphanCleanupDelaySecs"] = 0
+ suite_set_parameters["orphanCleanupDelaySecs"] = 1
# The LogicalSessionCache does automatic background refreshes in the server. This is
# race-y for tests, since tests trigger their own immediate refreshes instead. Turn off
diff --git a/buildscripts/resmokelib/testing/fixtures/replicaset.py b/buildscripts/resmokelib/testing/fixtures/replicaset.py
index 43bcc854b67..18bb323e9e1 100644
--- a/buildscripts/resmokelib/testing/fixtures/replicaset.py
+++ b/buildscripts/resmokelib/testing/fixtures/replicaset.py
@@ -10,6 +10,7 @@ import pymongo.errors
import pymongo.write_concern
from . import interface
+from . import replicaset_utils
from . import standalone
from ... import config
from ... import errors
@@ -116,11 +117,7 @@ class ReplicaSetFixture(interface.ReplFixture): # pylint: disable=too-many-inst
repl_config = {"_id": self.replset_name, "protocolVersion": 1}
client = self.nodes[0].mongo_client()
- if self.auth_options is not None:
- auth_db = client[self.auth_options["authenticationDatabase"]]
- auth_db.authenticate(self.auth_options["username"],
- password=self.auth_options["password"],
- mechanism=self.auth_options["authenticationMechanism"])
+ self.auth(client, self.auth_options)
if client.local.system.replset.count():
# Skip initializing the replset if there is an existing configuration.
@@ -193,6 +190,26 @@ class ReplicaSetFixture(interface.ReplFixture): # pylint: disable=too-many-inst
raise errors.ServerFailure(msg)
time.sleep(5) # Wait a little bit before trying again.
+ def await_last_op_committed(self):
+ """Wait for the last majority committed op to be visible."""
+ primary_client = self.get_primary().mongo_client()
+ self.auth(primary_client, self.auth_options)
+
+ primary_optime = replicaset_utils.get_last_optime(primary_client)
+ up_to_date_nodes = set()
+
+ def check_rcmaj_optime(client, node):
+ """Return True if all nodes have caught up with the primary."""
+ res = client.admin.command({"replSetGetStatus": 1})
+ read_concern_majority_optime = res["optimes"]["readConcernMajorityOpTime"]
+
+ if read_concern_majority_optime >= primary_optime:
+ up_to_date_nodes.add(node.port)
+
+ return len(up_to_date_nodes) == len(self.nodes)
+
+ self._await_cmd_all_nodes(check_rcmaj_optime, "waiting for last committed optime")
+
def await_ready(self):
"""Wait for replica set tpo be ready."""
self._await_primary()
@@ -232,16 +249,22 @@ class ReplicaSetFixture(interface.ReplFixture): # pylint: disable=too-many-inst
time.sleep(0.1) # Wait a little bit before trying again.
self.logger.info("Secondary on port %d is now available.", secondary.port)
+ @staticmethod
+ def auth(client, auth_options=None):
+ """Auth a client connection."""
+ if auth_options is not None:
+ auth_db = client[auth_options["authenticationDatabase"]]
+ auth_db.authenticate(auth_options["username"], password=auth_options["password"],
+ mechanism=auth_options["authenticationMechanism"])
+
+ return client
+
def _await_stable_checkpoint(self):
# Since this method is called at startup we expect the first node to be primary even when
# self.all_nodes_electable is True.
- primary = self.nodes[0]
- primary_client = primary.mongo_client()
- if self.auth_options is not None:
- auth_db = primary_client[self.auth_options["authenticationDatabase"]]
- auth_db.authenticate(self.auth_options["username"],
- password=self.auth_options["password"],
- mechanism=self.auth_options["authenticationMechanism"])
+ primary_client = self.nodes[0].mongo_client()
+ self.auth(primary_client, self.auth_options)
+
# Algorithm precondition: All nodes must be in primary/secondary state.
#
# 1) Perform a majority write. This will guarantee the primary updates its commit point
@@ -261,12 +284,9 @@ class ReplicaSetFixture(interface.ReplFixture): # pylint: disable=too-many-inst
for node in self.nodes:
self.logger.info("Waiting for node on port %d to have a stable checkpoint.", node.port)
client = node.mongo_client(read_preference=pymongo.ReadPreference.SECONDARY)
+ self.auth(client, self.auth_options)
+
client_admin = client["admin"]
- if self.auth_options is not None:
- client_auth_db = client[self.auth_options["authenticationDatabase"]]
- client_auth_db.authenticate(self.auth_options["username"],
- password=self.auth_options["password"],
- mechanism=self.auth_options["authenticationMechanism"])
while True:
status = client_admin.command("replSetGetStatus")
@@ -328,37 +348,49 @@ class ReplicaSetFixture(interface.ReplFixture): # pylint: disable=too-many-inst
# of the replica set are configured with priority=0.
return self.nodes[0]
+ def is_primary(client, node):
+ """Return if `node` is master."""
+ is_master = client.admin.command("isMaster")["ismaster"]
+ if is_master:
+ self.logger.info("The node on port %d is primary of replica set '%s'", node.port,
+ self.replset_name)
+ return True
+ return False
+
+ return self._await_cmd_all_nodes(is_primary, "waiting for a primary", timeout_secs)
+
+ def _await_cmd_all_nodes(self, fn, msg, timeout_secs=30):
+ """Run `fn` on all nodes until it returns a truthy value.
+
+ Return the node for which makes `fn` become truthy.
+
+ Two arguments are passed to fn: the client for a node and
+ the MongoDFixture corresponding to that node.
+ """
+
start = time.time()
clients = {}
while True:
for node in self.nodes:
- self._check_get_primary_timeout(start, timeout_secs)
+ now = time.time()
+ if (now - start) >= timeout_secs:
+ msg = "Timed out while {} for replica set '{}'.".format(msg, self.replset_name)
+ self.logger.error(msg)
+ raise errors.ServerFailure(msg)
try:
- client = clients.get(node.port)
- if not client:
- client = node.mongo_client()
- clients[node.port] = client
- is_master = client.admin.command("isMaster")["ismaster"]
+ if node.port not in clients:
+ clients[node.port] = self.auth(node.mongo_client(), self.auth_options)
+
+ if fn(clients[node.port], node):
+ return node
+
except pymongo.errors.AutoReconnect:
# AutoReconnect exceptions may occur if the primary stepped down since PyMongo
# last contacted it. We'll just try contacting the node again in the next round
# of isMaster requests.
continue
- if is_master:
- self.logger.info("The node on port %d is primary of replica set '%s'",
- node.port, self.replset_name)
- return node
-
- def _check_get_primary_timeout(self, start, timeout_secs):
- now = time.time()
- if (now - start) >= timeout_secs:
- msg = "Timed out while waiting for a primary for replica set '{}'.".format(
- self.replset_name)
- self.logger.error(msg)
- raise errors.ServerFailure(msg)
-
def get_secondaries(self):
"""Return a list of secondaries from the replica set."""
primary = self.get_primary()
diff --git a/buildscripts/resmokelib/testing/fixtures/replicaset_utils.py b/buildscripts/resmokelib/testing/fixtures/replicaset_utils.py
new file mode 100644
index 00000000000..33944dace67
--- /dev/null
+++ b/buildscripts/resmokelib/testing/fixtures/replicaset_utils.py
@@ -0,0 +1,34 @@
+"""
+Set of utility helper functions to get information about a replica set.
+
+These helpers can be used for any replica set, not only ones started by
+resmoke.py.
+"""
+
+import bson
+
+from buildscripts.resmokelib import errors
+
+
+def get_last_optime(client):
+ """Get the latest optime.
+
+ This function is derived from _getLastOpTime() in ReplSetTest.
+ """
+ repl_set_status = client.admin.command({"replSetGetStatus": 1})
+ conn_status = [m for m in repl_set_status["members"] if "self" in m][0]
+ optime = conn_status["optime"]
+
+ optime_is_empty = False
+
+ if isinstance(optime, bson.Timestamp): # PV0
+ optime_is_empty = (optime == bson.Timestamp(0, 0))
+ else: # PV1
+ optime_is_empty = (optime["ts"] == bson.Timestamp(0, 0) and optime["t"] == -1)
+
+ if optime_is_empty:
+ raise errors.ServerFailure(
+ "Uninitialized opTime being reported by {addr[0]}:{addr[1]}: {repl_set_status}".format(
+ addr=client.address, repl_set_status=repl_set_status))
+
+ return optime
diff --git a/buildscripts/resmokelib/testing/fixtures/shardedcluster.py b/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
index 06c36565b9d..b9fe63e623e 100644
--- a/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
+++ b/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
@@ -28,7 +28,8 @@ class ShardedClusterFixture(interface.Fixture): # pylint: disable=too-many-inst
self, logger, job_num, mongos_executable=None, mongos_options=None,
mongod_executable=None, mongod_options=None, dbpath_prefix=None, preserve_dbpath=False,
num_shards=1, num_rs_nodes_per_shard=None, num_mongos=1, enable_sharding=None,
- enable_balancer=True, auth_options=None, configsvr_options=None, shard_options=None):
+ enable_balancer=True, enable_autosplit=True, auth_options=None, configsvr_options=None,
+ shard_options=None):
"""Initialize ShardedClusterFixture with different options for the cluster processes."""
interface.Fixture.__init__(self, logger, job_num, dbpath_prefix=dbpath_prefix)
@@ -46,6 +47,7 @@ class ShardedClusterFixture(interface.Fixture): # pylint: disable=too-many-inst
self.num_mongos = num_mongos
self.enable_sharding = utils.default_if_none(enable_sharding, [])
self.enable_balancer = enable_balancer
+ self.enable_autosplit = enable_autosplit
self.auth_options = auth_options
self.configsvr_options = utils.default_if_none(configsvr_options, {})
self.shard_options = utils.default_if_none(shard_options, {})
@@ -114,10 +116,23 @@ class ShardedClusterFixture(interface.Fixture): # pylint: disable=too-many-inst
if not self.enable_balancer:
client.admin.command({"balancerStop": 1})
+ # Turn off autosplit if it is not meant to be enabled.
+ if not self.enable_autosplit:
+ wc = pymongo.WriteConcern(w="majority", wtimeout=30000)
+ coll = client.config.get_collection("settings", write_concern=wc)
+ coll.update_one({"_id": "autosplit"}, {"$set": {"enabled": False}}, upsert=True)
+
# Inform mongos about each of the shards
for shard in self.shards:
self._add_shard(client, shard)
+ # Ensure that all CSRS nodes are up to date. This is strictly needed for tests that use
+ # multiple mongoses. In those cases, the first mongos initializes the contents of the config
+ # database, but without waiting for those writes to replicate to all the config servers then
+ # the secondary mongoses risk reading from a stale config server and seeing an empty config
+ # database.
+ self.configsvr.await_last_op_committed()
+
# Enable sharding on each of the specified databases
for db_name in self.enable_sharding:
self.logger.info("Enabling sharding for '%s' database...", db_name)
@@ -155,7 +170,7 @@ class ShardedClusterFixture(interface.Fixture): # pylint: disable=too-many-inst
raise errors.ServerFailure(teardown_handler.get_error_message())
def is_running(self):
- """Return true if the all nodes in the cluster are all still operating."""
+ """Return true if all nodes in the cluster are all still operating."""
return (self.configsvr is not None and self.configsvr.is_running()
and all(shard.is_running() for shard in self.shards)
and all(mongos.is_running() for mongos in self.mongos))
diff --git a/buildscripts/resmokelib/testing/testcases/fsm_workload_test.py b/buildscripts/resmokelib/testing/testcases/fsm_workload_test.py
index 91ecd4de13c..908c9ffac93 100644
--- a/buildscripts/resmokelib/testing/testcases/fsm_workload_test.py
+++ b/buildscripts/resmokelib/testing/testcases/fsm_workload_test.py
@@ -2,6 +2,9 @@
from __future__ import absolute_import
+import os.path
+import threading
+
from buildscripts.resmokelib.testing.testcases import jsrunnerfile
@@ -10,6 +13,9 @@ class FSMWorkloadTestCase(jsrunnerfile.JSRunnerFileTestCase):
REGISTERED_NAME = "fsm_workload_test"
+ _COUNTER_LOCK = threading.Lock()
+ _COUNTER = 0
+
def __init__(self, logger, fsm_workload, shell_executable=None, shell_options=None):
"""Initialize the FSMWorkloadTestCase with the FSM workload file."""
@@ -24,4 +30,15 @@ class FSMWorkloadTestCase(jsrunnerfile.JSRunnerFileTestCase):
return self.test_name
def _populate_test_data(self, test_data):
+
test_data["fsmWorkloads"] = self.fsm_workload
+
+ with FSMWorkloadTestCase._COUNTER_LOCK:
+ count = FSMWorkloadTestCase._COUNTER
+ FSMWorkloadTestCase._COUNTER += 1
+
+ # We use a global incrementing counter as a prefix for the database name to avoid any
+ # collection lifecycle related issues in sharded clusters. This more closely matches how
+ # uniqueDBName() and uniqueCollName() would have returned distinct values when called once
+ # for each FSM workload in the entire schedule by runner.js.
+ test_data["dbNamePrefix"] = "test{:d}_".format(count)
diff --git a/etc/evergreen.yml b/etc/evergreen.yml
index c4821900763..e65a88a99f1 100644
--- a/etc/evergreen.yml
+++ b/etc/evergreen.yml
@@ -4226,14 +4226,20 @@ tasks:
resmoke_args: --suites=concurrency_replication --storageEngine=wiredTiger
- <<: *task_template
- name: concurrency_sharded
- exec_timeout_secs: 43200 # 12 hour timeout for the task overall
+ name: concurrency_sharded_replication
commands:
- func: "do setup"
- func: "run tests"
- timeout_secs: 21600 # 6 hour timeout for each test
vars:
- resmoke_args: --suites=concurrency_sharded --storageEngine=wiredTiger
+ resmoke_args: --suites=concurrency_sharded_replication --storageEngine=wiredTiger
+
+- <<: *task_template
+ name: concurrency_sharded_replication_with_balancer
+ commands:
+ - func: "do setup"
+ - func: "run tests"
+ vars:
+ resmoke_args: --suites=concurrency_sharded_replication_with_balancer --storageEngine=wiredTiger
- <<: *task_template
name: concurrency_sharded_causal_consistency
@@ -6570,7 +6576,8 @@ buildvariants:
- name: parallel_compatibility
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_simultaneous
- name: read_concern_linearizable_passthrough
- name: read_concern_majority_passthrough
@@ -6622,7 +6629,8 @@ buildvariants:
- name: parallel
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_simultaneous
- &linux-64-debug-template
@@ -6720,7 +6728,8 @@ buildvariants:
- name: change_streams_whole_cluster_sharded_collections_passthrough
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_sharded_causal_consistency
- name: concurrency_sharded_causal_consistency_and_balancer
- name: concurrency_sharded_with_stepdowns
@@ -6969,7 +6978,8 @@ buildvariants:
- name: parallel_compatibility
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_simultaneous
- name: replica_sets
- name: replica_sets_auth
@@ -7082,7 +7092,8 @@ buildvariants:
- name: parallel_compatibility
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_simultaneous
- name: replica_sets
- name: replica_sets_auth
@@ -7170,7 +7181,10 @@ buildvariants:
- name: concurrency_replication
distros:
- ubuntu1604-arm64-small
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ distros:
+ - ubuntu1604-arm64-small
+ - name: concurrency_sharded_replication_with_balancer
distros:
- ubuntu1604-arm64-small
- name: concurrency_simultaneous
@@ -7290,7 +7304,8 @@ buildvariants:
- name: parallel_compatibility
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_simultaneous
- name: replica_sets
- name: replica_sets_auth
@@ -7351,7 +7366,8 @@ buildvariants:
- name: bulk_gle_passthrough
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_simultaneous
- name: dbtest
- name: ese
@@ -7552,7 +7568,8 @@ buildvariants:
- name: parallel_compatibility
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_simultaneous
- name: replica_sets
- name: replica_sets_auth
@@ -7724,7 +7741,8 @@ buildvariants:
- name: parallel_compatibility
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_simultaneous
- name: replica_sets
- name: replica_sets_auth
@@ -7826,7 +7844,10 @@ buildvariants:
- name: concurrency_replication
distros:
- windows-64-vs2015-large
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ distros:
+ - windows-64-vs2015-large
+ - name: concurrency_sharded_replication_with_balancer
distros:
- windows-64-vs2015-large
- name: concurrency_sharded_causal_consistency
@@ -8390,7 +8411,8 @@ buildvariants:
distros:
- windows-64-vs2015-large # Some workloads require a lot of memory, use a bigger machine for this suite.
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_simultaneous
- name: jsCore
- name: jsCore_auth
@@ -8458,7 +8480,8 @@ buildvariants:
- name: bulk_gle_passthrough
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_simultaneous
- name: dbtest
- name: disk_wiredtiger
@@ -8611,7 +8634,8 @@ buildvariants:
- name: change_streams_whole_cluster_sharded_collections_passthrough
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_sharded_causal_consistency
- name: concurrency_sharded_causal_consistency_and_balancer
- name: concurrency_sharded_with_stepdowns
@@ -9236,7 +9260,8 @@ buildvariants:
- name: change_streams_whole_cluster_sharded_collections_passthrough
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_sharded_causal_consistency
- name: concurrency_sharded_causal_consistency_and_balancer
- name: concurrency_sharded_with_stepdowns
@@ -9771,7 +9796,8 @@ buildvariants:
- name: change_streams_whole_cluster_sharded_collections_passthrough
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_sharded_causal_consistency
- name: concurrency_sharded_causal_consistency_and_balancer
- name: concurrency_sharded_with_stepdowns
@@ -9995,7 +10021,8 @@ buildvariants:
- name: parallel_compatibility
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_simultaneous
- name: replica_sets
- name: replica_sets_auth
@@ -10084,7 +10111,10 @@ buildvariants:
- name: concurrency_replication
distros:
- rhel70
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ distros:
+ - rhel70
+ - name: concurrency_sharded_replication_with_balancer
distros:
- rhel70
- name: concurrency_simultaneous
@@ -10153,7 +10183,8 @@ buildvariants:
- name: bulk_gle_passthrough
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_simultaneous
- name: dbtest
- name: ese
@@ -10259,7 +10290,8 @@ buildvariants:
- name: bulk_gle_passthrough
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_simultaneous
- name: dbtest
- name: ese
@@ -10362,7 +10394,8 @@ buildvariants:
- name: bulk_gle_passthrough
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_simultaneous
- name: dbtest
- name: ese
@@ -10681,7 +10714,8 @@ buildvariants:
- name: bulk_gle_passthrough
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_simultaneous
- name: dbtest
- name: ese
@@ -10809,7 +10843,8 @@ buildvariants:
- name: parallel_compatibility
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_simultaneous
- name: replica_sets
- name: replica_sets_auth
@@ -11007,7 +11042,8 @@ buildvariants:
- name: parallel_compatibility
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_simultaneous
- name: replica_sets
- name: replica_sets_auth
@@ -11094,7 +11130,8 @@ buildvariants:
- name: parallel_compatibility
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_simultaneous
- name: replica_sets
- name: replica_sets_auth
@@ -11240,7 +11277,8 @@ buildvariants:
- name: parallel_compatibility
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_simultaneous
- name: replica_sets
- name: replica_sets_auth
@@ -11316,7 +11354,8 @@ buildvariants:
distros:
- rhel62-large # Some workloads require a lot of memory, use a bigger machine for this suite.
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_sharded_causal_consistency
- name: concurrency_sharded_causal_consistency_and_balancer
- name: concurrency_sharded_with_stepdowns
@@ -11640,7 +11679,8 @@ buildvariants:
distros:
- rhel62-large # Some workloads require a lot of memory, use a bigger machine for this suite.
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_simultaneous
- name: dbtest
- name: failpoints
@@ -11723,7 +11763,8 @@ buildvariants:
- name: bulk_gle_passthrough
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_simultaneous
- name: dbtest
- name: failpoints
@@ -11805,7 +11846,10 @@ buildvariants:
- name: concurrency_replication
distros:
- rhel72-zseries-build
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ distros:
+ - rhel72-zseries-build
+ - name: concurrency_sharded_replication_with_balancer
distros:
- rhel72-zseries-build
- name: concurrency_simultaneous
@@ -11942,7 +11986,8 @@ buildvariants:
- name: change_streams_whole_cluster_sharded_collections_passthrough
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_sharded_causal_consistency
- name: concurrency_sharded_causal_consistency_and_balancer
- name: concurrency_sharded_with_stepdowns
@@ -12114,7 +12159,8 @@ buildvariants:
- name: change_streams_whole_cluster_sharded_collections_passthrough
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_sharded_causal_consistency
- name: concurrency_sharded_causal_consistency_and_balancer
- name: concurrency_sharded_with_stepdowns
@@ -12257,7 +12303,8 @@ buildvariants:
- name: sharded_causally_consistent_jscore_passthrough
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_simultaneous
- name: dbtest
- name: disk_wiredtiger
@@ -12392,7 +12439,8 @@ buildvariants:
- name: bulk_gle_passthrough
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_sharded_causal_consistency
- name: concurrency_sharded_causal_consistency_and_balancer
- name: concurrency_simultaneous
@@ -12511,7 +12559,10 @@ buildvariants:
- name: concurrency_replication
distros:
- windows-64-vs2015-large
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ distros:
+ - windows-64-vs2015-large
+ - name: concurrency_sharded_replication_with_balancer
distros:
- windows-64-vs2015-large
- name: concurrency_sharded_causal_consistency
@@ -12619,7 +12670,8 @@ buildvariants:
- name: bulk_gle_passthrough
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_sharded_causal_consistency
- name: concurrency_sharded_causal_consistency_and_balancer
- name: concurrency_simultaneous
@@ -12705,7 +12757,8 @@ buildvariants:
- name: bulk_gle_passthrough
- name: concurrency
- name: concurrency_replication
- - name: concurrency_sharded
+ - name: concurrency_sharded_replication
+ - name: concurrency_sharded_replication_with_balancer
- name: concurrency_simultaneous
- name: dbtest
- name: disk_mmapv1
diff --git a/jstests/concurrency/fsm_all_sharded_causal_consistency.js b/jstests/concurrency/fsm_all_sharded_causal_consistency.js
deleted file mode 100644
index 6bd389cda22..00000000000
--- a/jstests/concurrency/fsm_all_sharded_causal_consistency.js
+++ /dev/null
@@ -1,98 +0,0 @@
-'use strict';
-
-load('jstests/concurrency/fsm_libs/runner.js');
-
-var dir = 'jstests/concurrency/fsm_workloads';
-
-var blacklist = [
- // Disabled due to known bugs
- 'distinct.js', // SERVER-13116 distinct isn't sharding aware
- 'distinct_noindex.js', // SERVER-13116 distinct isn't sharding aware
- 'distinct_projection.js', // SERVER-13116 distinct isn't sharding aware
- 'create_database.js', // SERVER-17397 Drops of sharded namespaces may not fully succeed
- 'drop_database.js', // SERVER-17397 Drops of sharded namespaces may not fully succeed
-
- // Disabled due to SERVER-33753, '.count() without a predicate can be wrong on sharded
- // collections'. This bug is problematic for these workloads because they assert on count()
- // values:
- 'agg_match.js',
-
- // $lookup and $graphLookup are not supported on sharded collections.
- 'agg_graph_lookup.js',
- 'view_catalog_cycle_lookup.js',
-
- // Disabled due to SERVER-20057, 'Concurrent, sharded mapReduces can fail when temporary
- // namespaces collide across mongos processes'
- 'map_reduce_drop.js',
- 'map_reduce_inline.js',
- 'map_reduce_merge.js',
- 'map_reduce_merge_nonatomic.js',
- 'map_reduce_reduce.js',
- 'map_reduce_reduce_nonatomic.js',
- 'map_reduce_replace.js',
- 'map_reduce_replace_nonexistent.js',
- 'map_reduce_replace_remove.js',
-
- // Disabled due to MongoDB restrictions and/or workload restrictions
-
- // These workloads sometimes trigger 'Could not lock auth data update lock'
- // errors because the AuthorizationManager currently waits for only five
- // seconds to acquire the lock for authorization documents
- 'auth_create_role.js',
- 'auth_create_user.js',
- 'auth_drop_role.js',
- 'auth_drop_user.js',
-
- 'agg_group_external.js', // uses >100MB of data, which can overwhelm test hosts
- 'agg_sort_external.js', // uses >100MB of data, which can overwhelm test hosts
- 'compact.js', // compact can only be run against a standalone mongod
- 'compact_simultaneous_padding_bytes.js', // compact can only be run against a mongod
- 'convert_to_capped_collection.js', // convertToCapped can't be run on mongos processes
- 'convert_to_capped_collection_index.js', // convertToCapped can't be run on mongos processes
- 'findAndModify_mixed_queue_unindexed.js', // findAndModify requires a shard key
- 'findAndModify_remove_queue.js', // remove cannot be {} for findAndModify
- 'findAndModify_remove_queue_unindexed.js', // findAndModify requires a shard key
- 'findAndModify_update_collscan.js', // findAndModify requires a shard key
- 'findAndModify_update_grow.js', // can cause OOM kills on test hosts
- 'findAndModify_update_queue.js', // findAndModify requires a shard key
- 'findAndModify_update_queue_unindexed.js', // findAndModify requires a shard key
- 'group.js', // the group command cannot be issued against a sharded cluster
- 'group_cond.js', // the group command cannot be issued against a sharded cluster
- 'indexed_insert_eval.js', // eval doesn't work with sharded collections
- 'indexed_insert_eval_nolock.js', // eval doesn't work with sharded collections
-
- 'plan_cache_drop_database.js', // cannot ensureIndex after dropDatabase without sharding first
- 'remove_single_document.js', // our .remove(query, {justOne: true}) calls lack shard keys
- 'remove_single_document_eval.js', // eval doesn't work with sharded collections
- 'remove_single_document_eval_nolock.js', // eval doesn't work with sharded collections
-
- // The rename_* workloads are disabled since renameCollection doesn't work with sharded
- // collections
- 'rename_capped_collection_chain.js',
- 'rename_capped_collection_dbname_chain.js',
- 'rename_capped_collection_dbname_droptarget.js',
- 'rename_capped_collection_droptarget.js',
- 'rename_collection_chain.js',
- 'rename_collection_dbname_chain.js',
- 'rename_collection_dbname_droptarget.js',
- 'rename_collection_droptarget.js',
-
- 'update_simple_eval.js', // eval doesn't work with sharded collections
- 'update_simple_eval_nolock.js', // eval doesn't work with sharded collections
- 'update_upsert_multi.js', // our update queries lack shard keys
- 'update_upsert_multi_noindex.js', // our update queries lack shard keys
- 'upsert_where.js', // cannot use upsert command with $where with sharded collections
- 'yield_and_hashed.js', // stagedebug can only be run against a standalone mongod
- 'yield_and_sorted.js', // stagedebug can only be run against a standalone mongod
-
- 'reindex_background.js' // TODO SERVER-30983
-].map(function(file) {
- return dir + '/' + file;
-});
-
-runWorkloadsSerially(
- ls(dir).filter(function(file) {
- return !Array.contains(blacklist, file);
- }),
- {sharded: {enabled: true}, replication: {enabled: true}},
- {sessionOptions: {causalConsistency: true, readPreference: {mode: "secondary"}}});
diff --git a/jstests/concurrency/fsm_all_sharded_causal_consistency_and_balancer.js b/jstests/concurrency/fsm_all_sharded_causal_consistency_and_balancer.js
deleted file mode 100644
index c192723aa18..00000000000
--- a/jstests/concurrency/fsm_all_sharded_causal_consistency_and_balancer.js
+++ /dev/null
@@ -1,103 +0,0 @@
-'use strict';
-
-load('jstests/concurrency/fsm_libs/runner.js');
-
-var dir = 'jstests/concurrency/fsm_workloads';
-
-var blacklist = [
- // Disabled due to known bugs
- 'distinct.js', // SERVER-13116 distinct isn't sharding aware
- 'distinct_noindex.js', // SERVER-13116 distinct isn't sharding aware
- 'distinct_projection.js', // SERVER-13116 distinct isn't sharding aware
- 'create_database.js', // SERVER-17397 Drops of sharded namespaces may not fully succeed
- 'drop_database.js', // SERVER-17397 Drops of sharded namespaces may not fully succeed
- 'remove_where.js', // SERVER-14669 Multi-removes that use $where miscount removed documents
-
- // Disabled due to SERVER-33753, '.count() without a predicate can be wrong on sharded
- // collections'. This bug is problematic for these workloads because they assert on count()
- // values:
- 'agg_match.js',
-
- // $lookup and $graphLookup are not supported on sharded collections.
- 'agg_graph_lookup.js',
- 'view_catalog_cycle_lookup.js',
-
- // Disabled due to SERVER-20057, 'Concurrent, sharded mapReduces can fail when temporary
- // namespaces collide across mongos processes'
- 'map_reduce_drop.js',
- 'map_reduce_inline.js',
- 'map_reduce_merge.js',
- 'map_reduce_merge_nonatomic.js',
- 'map_reduce_reduce.js',
- 'map_reduce_reduce_nonatomic.js',
- 'map_reduce_replace.js',
- 'map_reduce_replace_nonexistent.js',
- 'map_reduce_replace_remove.js',
-
- // Disabled due to SERVER-13364, 'The geoNear command doesn't handle shard versioning, so a
- // concurrent chunk migration may cause duplicate or missing results'
- 'yield_geo_near_dedup.js',
-
- // Disabled due to MongoDB restrictions and/or workload restrictions
-
- // These workloads sometimes trigger 'Could not lock auth data update lock'
- // errors because the AuthorizationManager currently waits for only five
- // seconds to acquire the lock for authorization documents
- 'auth_create_role.js',
- 'auth_create_user.js',
- 'auth_drop_role.js',
- 'auth_drop_user.js',
-
- 'agg_group_external.js', // uses >100MB of data, which can overwhelm test hosts
- 'agg_sort_external.js', // uses >100MB of data, which can overwhelm test hosts
- 'compact.js', // compact can only be run against a standalone mongod
- 'compact_simultaneous_padding_bytes.js', // compact can only be run against a mongod
- 'convert_to_capped_collection.js', // convertToCapped can't be run on mongos processes
- 'convert_to_capped_collection_index.js', // convertToCapped can't be run on mongos processes
- 'findAndModify_mixed_queue_unindexed.js', // findAndModify requires a shard key
- 'findAndModify_remove_queue.js', // remove cannot be {} for findAndModify
- 'findAndModify_remove_queue_unindexed.js', // findAndModify requires a shard key
- 'findAndModify_update_collscan.js', // findAndModify requires a shard key
- 'findAndModify_update_grow.js', // can cause OOM kills on test hosts
- 'findAndModify_update_queue.js', // findAndModify requires a shard key
- 'findAndModify_update_queue_unindexed.js', // findAndModify requires a shard key
- 'group.js', // the group command cannot be issued against a sharded cluster
- 'group_cond.js', // the group command cannot be issued against a sharded cluster
- 'indexed_insert_eval.js', // eval doesn't work with sharded collections
- 'indexed_insert_eval_nolock.js', // eval doesn't work with sharded collections
-
- 'plan_cache_drop_database.js', // cannot ensureIndex after dropDatabase without sharding first
- 'remove_single_document.js', // our .remove(query, {justOne: true}) calls lack shard keys
- 'remove_single_document_eval.js', // eval doesn't work with sharded collections
- 'remove_single_document_eval_nolock.js', // eval doesn't work with sharded collections
-
- // The rename_* workloads are disabled since renameCollection doesn't work with sharded
- // collections
- 'rename_capped_collection_chain.js',
- 'rename_capped_collection_dbname_chain.js',
- 'rename_capped_collection_dbname_droptarget.js',
- 'rename_capped_collection_droptarget.js',
- 'rename_collection_chain.js',
- 'rename_collection_dbname_chain.js',
- 'rename_collection_dbname_droptarget.js',
- 'rename_collection_droptarget.js',
-
- 'update_simple_eval.js', // eval doesn't work with sharded collections
- 'update_simple_eval_nolock.js', // eval doesn't work with sharded collections
- 'update_upsert_multi.js', // our update queries lack shard keys
- 'update_upsert_multi_noindex.js', // our update queries lack shard keys
- 'upsert_where.js', // cannot use upsert command with $where with sharded collections
- 'yield_and_hashed.js', // stagedebug can only be run against a standalone mongod
- 'yield_and_sorted.js', // stagedebug can only be run against a standalone mongod
-
- 'reindex_background.js' // TODO SERVER-30983
-].map(function(file) {
- return dir + '/' + file;
-});
-
-runWorkloadsSerially(
- ls(dir).filter(function(file) {
- return !Array.contains(blacklist, file);
- }),
- {sharded: {enabled: true, enableBalancer: true}, replication: {enabled: true}},
- {sessionOptions: {causalConsistency: true, readPreference: {mode: "secondary"}}});
diff --git a/jstests/concurrency/fsm_libs/cluster.js b/jstests/concurrency/fsm_libs/cluster.js
index 5033e867d63..f925d3ac51f 100644
--- a/jstests/concurrency/fsm_libs/cluster.js
+++ b/jstests/concurrency/fsm_libs/cluster.js
@@ -3,7 +3,8 @@
/**
* Represents a MongoDB cluster.
*/
-load('jstests/hooks/validate_collections.js'); // Loads the validateCollections function.
+load('jstests/hooks/validate_collections.js'); // For validateCollections.
+load('jstests/concurrency/fsm_libs/shard_fixture.js'); // For FSMShardingTest.
var Cluster = function(options) {
if (!(this instanceof Cluster)) {
@@ -188,6 +189,7 @@ var Cluster = function(options) {
var conn;
var st;
+ var rawST; // The raw ShardingTest object for test suites not using resmoke fixtures.
var initialized = false;
var clusterStartTime;
@@ -209,49 +211,46 @@ var Cluster = function(options) {
if (options.sharded.enabled) {
if (options.useExistingConnectionAsSeed) {
- // Note that depending on how SERVER-21485 is implemented, it may still not be
- // possible to rehydrate a ShardingTest instance from an existing connection because
- // it wouldn't be possible to discover other mongos processes running in the sharded
- // cluster.
- throw new Error(
- "Cluster cannot support 'useExistingConnectionAsSeed' option until" +
- ' SERVER-21485 is implemented');
- }
+ st = new FSMShardingTest(`mongodb://${db.getMongo().host}`);
+ } else {
+ // TODO: allow 'options' to specify the number of shards and mongos processes
+ var shardConfig = {
+ shards: options.sharded.numShards,
+ mongos: options.sharded.numMongos,
+ verbose: verbosityLevel,
+ other: {
+ enableAutoSplit: options.sharded.enableAutoSplit,
+ enableBalancer: options.sharded.enableBalancer,
+ }
+ };
- // TODO: allow 'options' to specify the number of shards and mongos processes
- var shardConfig = {
- shards: options.sharded.numShards,
- mongos: options.sharded.numMongos,
- verbose: verbosityLevel,
- other: {
- enableAutoSplit: options.sharded.enableAutoSplit,
- enableBalancer: options.sharded.enableBalancer,
+ // TODO: allow 'options' to specify an 'rs' config
+ if (options.replication.enabled) {
+ shardConfig.rs = {
+ nodes: makeReplSetTestConfig(options.replication.numNodes,
+ !this.shouldPerformContinuousStepdowns()),
+ // Increase the oplog size (in MB) to prevent rollover
+ // during write-heavy workloads
+ oplogSize: 1024,
+ // Set the electionTimeoutMillis to 1 day to prevent unintended elections
+ settings: {electionTimeoutMillis: 60 * 60 * 24 * 1000},
+ verbose: verbosityLevel
+ };
+ shardConfig.rsOptions = {};
}
- };
- // TODO: allow 'options' to specify an 'rs' config
- if (options.replication.enabled) {
- shardConfig.rs = {
- nodes: makeReplSetTestConfig(options.replication.numNodes,
- !this.shouldPerformContinuousStepdowns()),
- // Increase the oplog size (in MB) to prevent rollover
- // during write-heavy workloads
- oplogSize: 1024,
- // Set the electionTimeoutMillis to 1 day to prevent unintended elections
- settings: {electionTimeoutMillis: 60 * 60 * 24 * 1000},
- verbose: verbosityLevel
- };
- shardConfig.rsOptions = {};
- }
+ if (this.shouldPerformContinuousStepdowns()) {
+ load('jstests/libs/override_methods/continuous_stepdown.js');
+ ContinuousStepdown.configure(options.sharded.stepdownOptions);
+ }
- if (this.shouldPerformContinuousStepdowns()) {
- load('jstests/libs/override_methods/continuous_stepdown.js');
- ContinuousStepdown.configure(options.sharded.stepdownOptions);
- }
+ rawST = new ShardingTest(shardConfig);
+ const hostStr = "mongodb://" + rawST._mongos.map(conn => conn.host).join(",");
- st = new ShardingTest(shardConfig);
+ st = new FSMShardingTest(hostStr);
+ }
- conn = st.s; // mongos
+ conn = st.s(0); // First mongos
this.teardown = function teardown(opts) {
options.teardownFunctions.mongod.forEach(this.executeOnMongodNodes);
@@ -264,45 +263,50 @@ var Cluster = function(options) {
if (this.shouldPerformContinuousStepdowns()) {
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
}
- st.stop(opts);
+
+ if (!options.useExistingConnectionAsSeed) {
+ rawST.stop(opts);
+ }
};
if (this.shouldPerformContinuousStepdowns()) {
this.startContinuousFailover = function() {
- st.startContinuousFailover();
+ rawST.startContinuousFailover();
};
this.stopContinuousFailover = function() {
- st.stopContinuousFailover({waitForPrimary: true, waitForMongosRetarget: true});
+ rawST.stopContinuousFailover(
+ {waitForPrimary: true, waitForMongosRetarget: true});
+
+ // Call getPrimary() to re-establish the connections in FSMShardingTest
+ // as it is not a transparent proxy for SharingTest/rawST.
+ st._configsvr.getPrimary();
+ for (let rst of st._shard_rsts) {
+ rst.getPrimary();
+ }
};
}
- // Save all mongos and mongod connections
- var i = 0;
- var mongos = st.s0;
- var mongod = st.d0;
- while (mongos) {
- _conns.mongos.push(mongos);
- ++i;
- mongos = st['s' + i];
+ // Save all mongos, mongod, and ReplSet connections (if any).
+ var i;
+
+ i = 0;
+ while (st.s(i)) {
+ _conns.mongos.push(st.s(i++));
}
- if (options.replication) {
- var rsTest = st.rs0;
-
- i = 0;
- while (rsTest) {
- this._addReplicaSetConns(rsTest);
- replSets.push(rsTest);
- ++i;
- rsTest = st['rs' + i];
- }
+
+ i = 0;
+ while (st.d(i)) {
+ _conns.mongod.push(st.d(i++));
}
+
i = 0;
- while (mongod) {
- _conns.mongod.push(mongod);
- ++i;
- mongod = st['d' + i];
+ while (st.rs(i)) {
+ var rs = st.rs(i++);
+ this._addReplicaSetConns(rs);
+ replSets.push(rs);
}
+
} else if (options.replication.enabled) {
var replSetConfig = {
nodes: makeReplSetTestConfig(options.replication.numNodes,
@@ -390,12 +394,12 @@ var Cluster = function(options) {
}
var configs = [];
- var config = st.c0;
+ var config = st.c(0);
var i = 0;
while (config) {
configs.push(config);
++i;
- config = st['c' + i];
+ config = st.c(i);
}
configs.forEach(function(conn) {
@@ -520,52 +524,40 @@ var Cluster = function(options) {
var cluster = {mongos: [], config: [], shards: {}};
var i = 0;
- var mongos = st.s0;
+ var mongos = st.s(0);
while (mongos) {
cluster.mongos.push(mongos.name);
++i;
- mongos = st['s' + i];
+ mongos = st.s(i);
}
i = 0;
- var config = st.c0;
+ var config = st.c(0);
while (config) {
cluster.config.push(config.name);
++i;
- config = st['c' + i];
+ config = st.c(i);
}
i = 0;
- var shard = st.shard0;
+ var shard = st.shard(0);
while (shard) {
if (shard.name.includes('/')) {
- // If the shard is a replica set, the format of st.shard0.name in ShardingTest is
+ // If the shard is a replica set, the format of st.shard(0).name in ShardingTest is
// "test-rs0/localhost:20006,localhost:20007,localhost:20008".
var [setName, shards] = shard.name.split('/');
cluster.shards[setName] = shards.split(',');
} else {
- // If the shard is a standalone mongod, the format of st.shard0.name in ShardingTest
- // is "localhost:20006".
+ // If the shard is a standalone mongod, the format of st.shard(0).name in
+ // ShardingTest is "localhost:20006".
cluster.shards[shard.shardName] = [shard.name];
}
++i;
- shard = st['shard' + i];
+ shard = st.shard(i);
}
return cluster;
};
- this.startBalancer = function startBalancer() {
- assert(initialized, 'cluster must be initialized first');
- assert(this.isSharded(), 'cluster is not sharded');
- st.startBalancer();
- };
-
- this.stopBalancer = function stopBalancer() {
- assert(initialized, 'cluster must be initialized first');
- assert(this.isSharded(), 'cluster is not sharded');
- st.stopBalancer();
- };
-
this.isBalancerEnabled = function isBalancerEnabled() {
return this.isSharded() && options.sharded.enableBalancer;
};
@@ -696,7 +688,7 @@ var Cluster = function(options) {
if (this.isSharded()) {
// Get the storage engine the sharded cluster is configured to use from one of the
// shards since mongos won't report it.
- adminDB = st.shard0.getDB('admin');
+ adminDB = st.shard(0).getDB('admin');
}
var res = adminDB.runCommand({getCmdLineOpts: 1});
diff --git a/jstests/concurrency/fsm_libs/fsm.js b/jstests/concurrency/fsm_libs/fsm.js
index e7a3eafb946..0c395bc6c19 100644
--- a/jstests/concurrency/fsm_libs/fsm.js
+++ b/jstests/concurrency/fsm_libs/fsm.js
@@ -21,14 +21,23 @@ var fsm = (function() {
// See fsm_libs/cluster.js for the format of args.cluster.
var connCache;
if (args.passConnectionCache) {
+ // In order to ensure that all operations performed by a worker thread happen on the
+ // same session, we override the "_defaultSession" property of the connections in the
+ // cache to be the same as the session underlying 'args.db'.
+ const makeNewConnWithExistingSession = function(connStr) {
+ const conn = new Mongo(connStr);
+ conn._defaultSession = new _DelegatingDriverSession(conn, args.db.getSession());
+ return conn;
+ };
+
connCache = {mongos: [], config: [], shards: {}};
- connCache.mongos = args.cluster.mongos.map(connStr => new Mongo(connStr));
- connCache.config = args.cluster.config.map(connStr => new Mongo(connStr));
+ connCache.mongos = args.cluster.mongos.map(makeNewConnWithExistingSession);
+ connCache.config = args.cluster.config.map(makeNewConnWithExistingSession);
var shardNames = Object.keys(args.cluster.shards);
shardNames.forEach(name => (connCache.shards[name] = args.cluster.shards[name].map(
- connStr => new Mongo(connStr))));
+ makeNewConnWithExistingSession)));
}
for (var i = 0; i < args.iterations; ++i) {
diff --git a/jstests/concurrency/fsm_libs/resmoke_runner.js b/jstests/concurrency/fsm_libs/resmoke_runner.js
index d94fd4e31cc..3187a16bc05 100644
--- a/jstests/concurrency/fsm_libs/resmoke_runner.js
+++ b/jstests/concurrency/fsm_libs/resmoke_runner.js
@@ -174,7 +174,19 @@
clusterOptions.replication.enabled = true;
clusterOptions.replication.numNodes = topology.nodes.length;
} else if (topology.type === Topology.kShardedCluster) {
- throw new Error("resmoke_runner.js doesn't currently support sharded clusters");
+ clusterOptions.replication.enabled = TestData.usingReplicaSetShards || false;
+ clusterOptions.sharded.enabled = true;
+ clusterOptions.sharded.enableAutoSplit =
+ TestData.hasOwnProperty('runningWithAutoSplit') ? TestData.runningWithAutoSplit : true;
+ clusterOptions.sharded.enableBalancer =
+ TestData.hasOwnProperty('runningWithBalancer') ? TestData.runningWithBalancer : true;
+ clusterOptions.sharded.numMongos = topology.mongos.nodes.length;
+ clusterOptions.sharded.numShards = Object.keys(topology.shards).length;
+ clusterOptions.sharded.stepdownOptions = {};
+ clusterOptions.sharded.stepdownOptions.configStepdown =
+ TestData.runningWithConfigStepdowns || false;
+ clusterOptions.sharded.stepdownOptions.shardStepdown =
+ TestData.runningWithShardStepdowns || false;
} else if (topology.type !== Topology.kStandalone) {
throw new Error('Unrecognized topology format: ' + tojson(topology));
}
@@ -184,5 +196,20 @@
workloads = [workloads];
}
- runWorkloads(workloads, {cluster: clusterOptions});
+ let sessionOptions = {};
+ if (TestData.runningWithCausalConsistency) {
+ sessionOptions = Object.assign(
+ sessionOptions, {causalConsistency: true, readPreference: {mode: 'secondary'}});
+ }
+ if (TestData.runningWithConfigStepdowns || TestData.runningWithShardStepdowns) {
+ sessionOptions = Object.assign(sessionOptions, {retryWrites: true});
+ }
+
+ const executionOptions = {dbNamePrefix: TestData.dbNamePrefix || ""};
+
+ if (Object.keys(sessionOptions).length > 0) {
+ executionOptions.sessionOptions = sessionOptions;
+ }
+
+ runWorkloads(workloads, {cluster: clusterOptions, execution: executionOptions});
})();
diff --git a/jstests/concurrency/fsm_libs/shard_fixture.js b/jstests/concurrency/fsm_libs/shard_fixture.js
index 807de6d5e52..fb09789dbe1 100644
--- a/jstests/concurrency/fsm_libs/shard_fixture.js
+++ b/jstests/concurrency/fsm_libs/shard_fixture.js
@@ -1,6 +1,6 @@
load('jstests/libs/discover_topology.js');
-class FSMShardingTest {
+var FSMShardingTest = class {
constructor(connStr) {
/**
* `topology` has the following format:
@@ -81,7 +81,11 @@ class FSMShardingTest {
}
d(n = 0) {
- return this.shard(n);
+ // Only return for non-replset shards.
+ if (this._shard_rsts[n] === undefined) {
+ return this._shard_connections[n];
+ }
+ return undefined;
}
/**
@@ -122,4 +126,4 @@ class FSMShardingTest {
/*
* Internal Functions.
*/
-}
+};
diff --git a/jstests/concurrency/fsm_libs/worker_thread.js b/jstests/concurrency/fsm_libs/worker_thread.js
index 3b2ec6e8571..806d04f20cb 100644
--- a/jstests/concurrency/fsm_libs/worker_thread.js
+++ b/jstests/concurrency/fsm_libs/worker_thread.js
@@ -72,7 +72,21 @@ var workerThread = (function() {
delete args.sessionOptions.initialOperationTime;
}
- const session = new Mongo(connectionString).startSession(args.sessionOptions);
+ const mongo = new Mongo(connectionString);
+
+ const session = mongo.startSession(args.sessionOptions);
+ const readPreference = session.getOptions().getReadPreference();
+ if (readPreference && readPreference.mode === 'secondary') {
+ // Unset the explicit read preference so set_read_preference_secondary.js can do
+ // the right thing based on the DB.
+ session.getOptions().setReadPreference(undefined);
+
+ // We load() set_read_preference_secondary.js in order to avoid running
+ // commands against the "admin" and "config" databases via mongos with
+ // readPreference={mode: "secondary"} when there's only a single node in
+ // the CSRS.
+ load('jstests/libs/override_methods/set_read_preference_secondary.js');
+ }
if (typeof initialClusterTime !== 'undefined') {
session.advanceClusterTime(initialClusterTime);
diff --git a/jstests/concurrency/fsm_workloads/sharded_base_partitioned.js b/jstests/concurrency/fsm_workloads/sharded_base_partitioned.js
index a476eb43860..b6bcf8fd76e 100644
--- a/jstests/concurrency/fsm_workloads/sharded_base_partitioned.js
+++ b/jstests/concurrency/fsm_workloads/sharded_base_partitioned.js
@@ -31,8 +31,8 @@ var $config = (function() {
shardKey: {_id: 1},
};
- data.makePartition = function makePartition(tid, partitionSize) {
- var partition = {};
+ data.makePartition = function makePartition(ns, tid, partitionSize) {
+ var partition = {ns: ns};
partition.lower = tid * partitionSize;
partition.upper = (tid * partitionSize) + partitionSize;
@@ -67,18 +67,23 @@ var $config = (function() {
// This may be due to SERVER-18341, where the Matcher returns false positives in
// comparison predicates with MinKey/MaxKey.
if (this.partition.isLowChunk && this.partition.isHighChunk) {
- return coll.aggregate([{$sample: {size: 1}}]).toArray()[0];
+ return coll
+ .aggregate([
+ {$match: {ns: this.partition.ns}},
+ {$sample: {size: 1}},
+ ])
+ .toArray()[0];
} else if (this.partition.isLowChunk) {
return coll
.aggregate([
- {$match: {'max._id': {$lte: this.partition.chunkUpper}}},
+ {$match: {ns: this.partition.ns, 'max._id': {$lte: this.partition.chunkUpper}}},
{$sample: {size: 1}}
])
.toArray()[0];
} else if (this.partition.isHighChunk) {
return coll
.aggregate([
- {$match: {'min._id': {$gte: this.partition.chunkLower}}},
+ {$match: {ns: this.partition.ns, 'min._id': {$gte: this.partition.chunkLower}}},
{$sample: {size: 1}}
])
.toArray()[0];
@@ -87,6 +92,7 @@ var $config = (function() {
.aggregate([
{
$match: {
+ ns: this.partition.ns,
'min._id': {$gte: this.partition.chunkLower},
'max._id': {$lte: this.partition.chunkUpper}
}
@@ -105,13 +111,13 @@ var $config = (function() {
// Inform this thread about its partition,
// and verify that its partition is encapsulated in a single chunk.
function init(db, collName, connCache) {
+ var ns = db[collName].getFullName();
+
// Inform this thread about its partition.
// The tid of each thread is assumed to be in the range [0, this.threadCount).
- this.partition = this.makePartition(this.tid, this.partitionSize);
+ this.partition = this.makePartition(ns, this.tid, this.partitionSize);
Object.freeze(this.partition);
- var ns = db[collName].getFullName();
-
// Verify that there is exactly 1 chunk in our partition.
var config = ChunkHelper.getPrimary(connCache.config);
var numChunks = ChunkHelper.getNumChunks(
@@ -147,7 +153,7 @@ var $config = (function() {
for (var tid = 0; tid < this.threadCount; ++tid) {
// Define this thread's partition.
// The tid of each thread is assumed to be in the range [0, this.threadCount).
- var partition = this.makePartition(tid, this.partitionSize);
+ var partition = this.makePartition(ns, tid, this.partitionSize);
// Populate this thread's partition.
var bulk = db[collName].initializeUnorderedBulkOp();
diff --git a/jstests/concurrency/fsm_workloads/sharded_mergeChunks_partitioned.js b/jstests/concurrency/fsm_workloads/sharded_mergeChunks_partitioned.js
index b7ab427c434..5b4b9ea996d 100644
--- a/jstests/concurrency/fsm_workloads/sharded_mergeChunks_partitioned.js
+++ b/jstests/concurrency/fsm_workloads/sharded_mergeChunks_partitioned.js
@@ -58,7 +58,8 @@ var $config = extendWorkload($config, function($config, $super) {
$config.states.init = function init(db, collName, connCache) {
// Inform this thread about its partition.
// Each thread has tid in range 0..(n-1) where n is the number of threads.
- this.partition = this.makePartition(this.tid, this.partitionSize);
+ this.partition =
+ this.makePartition(db[collName].getFullName(), this.tid, this.partitionSize);
Object.freeze(this.partition);
var config = ChunkHelper.getPrimary(connCache.config);
@@ -90,6 +91,7 @@ var $config = extendWorkload($config, function($config, $super) {
// Skip this iteration if our data partition contains less than 2 chunks.
if (configDB.chunks
.find({
+ ns: ns,
'min._id': {$gte: this.partition.lower},
'max._id': {$lte: this.partition.upper}
})
@@ -101,9 +103,9 @@ var $config = extendWorkload($config, function($config, $super) {
chunk1 = this.getRandomChunkInPartition(config);
// If we randomly chose the last chunk, choose the one before it.
if (chunk1.max._id === this.partition.chunkUpper) {
- chunk1 = configDB.chunks.findOne({'max._id': chunk1.min._id});
+ chunk1 = configDB.chunks.findOne({ns: ns, 'max._id': chunk1.min._id});
}
- chunk2 = configDB.chunks.findOne({'min._id': chunk1.max._id});
+ chunk2 = configDB.chunks.findOne({ns: ns, 'min._id': chunk1.max._id});
// Save the number of documents found in these two chunks' ranges before the mergeChunks
// operation. This will be used to verify that the same number of documents in that
diff --git a/jstests/noPassthrough/shard_fixture_selftest.js b/jstests/noPassthrough/shard_fixture_selftest.js
index 7bbaeeb8874..b4b56ba74d6 100644
--- a/jstests/noPassthrough/shard_fixture_selftest.js
+++ b/jstests/noPassthrough/shard_fixture_selftest.js
@@ -28,6 +28,8 @@
assert.eq(rsTestWrapper.rs(1).getURL(), rsTestOriginal.rs1.getURL());
assert.eq(rsTestWrapper.rs(2), rsTestOriginal.rs2); // Both should be undefined.
+ assert.eq(rsTestWrapper.d(0), rsTestOriginal.d0); // Both should be undefined.
+
assert.eq(rsTestWrapper.c(0).host, rsTestOriginal.c0.host);
assert.eq(rsTestWrapper.c(1).host, rsTestOriginal.c1.host);
assert.eq(rsTestWrapper.c(2), rsTestOriginal.c2); // Both should be undefined.
@@ -46,7 +48,9 @@
assert.eq(dTestWrapper.shard(0).host, dTestOriginal.shard0.host);
assert.eq(dTestWrapper.s(0).host, dTestOriginal.s0.host);
assert.eq(dTestWrapper.d(0).host, dTestOriginal.d0.host);
- assert.eq(rsTestWrapper.c(0).host, rsTestOriginal.c0.host);
+ assert.eq(dTestWrapper.c(0).host, dTestOriginal.c0.host);
+
+ assert.eq(dTestWrapper.rs(0), dTestOriginal.rs0); // Both should be undefined.
dTestOriginal.stop();
})(); \ No newline at end of file
diff --git a/src/mongo/shell/collection.js b/src/mongo/shell/collection.js
index 89eda930e8b..abec36729ea 100644
--- a/src/mongo/shell/collection.js
+++ b/src/mongo/shell/collection.js
@@ -1147,10 +1147,6 @@ DBCollection.prototype.toString = function() {
return this.getFullName();
};
-DBCollection.prototype.toString = function() {
- return this.getFullName();
-};
-
DBCollection.prototype.tojson = DBCollection.prototype.toString;
DBCollection.prototype.shellPrint = DBCollection.prototype.toString;