summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorRobert Guo <robert.guo@10gen.com>2018-04-11 11:05:13 -0400
committerRobert Guo <robert.guo@10gen.com>2018-04-30 09:56:57 -0400
commit39622745cd5258d40924c8e44be73b5c2e1b4ca4 (patch)
treecfb986e14083c3736747ced43246e92fa51978b0 /jstests
parent0b04f8bab03c64477b6ffd60fcd1c592dd4ca2b1 (diff)
downloadmongo-39622745cd5258d40924c8e44be73b5c2e1b4ca4.tar.gz
SERVER-19630 allow FSM tests to connect to an existing cluster
Diffstat (limited to 'jstests')
-rw-r--r--jstests/concurrency/fsm_all_sharded_causal_consistency.js98
-rw-r--r--jstests/concurrency/fsm_all_sharded_causal_consistency_and_balancer.js103
-rw-r--r--jstests/concurrency/fsm_libs/cluster.js166
-rw-r--r--jstests/concurrency/fsm_libs/fsm.js15
-rw-r--r--jstests/concurrency/fsm_libs/resmoke_runner.js31
-rw-r--r--jstests/concurrency/fsm_libs/shard_fixture.js10
-rw-r--r--jstests/concurrency/fsm_libs/worker_thread.js16
-rw-r--r--jstests/concurrency/fsm_workloads/sharded_base_partitioned.js24
-rw-r--r--jstests/concurrency/fsm_workloads/sharded_mergeChunks_partitioned.js8
-rw-r--r--jstests/noPassthrough/shard_fixture_selftest.js6
10 files changed, 167 insertions, 310 deletions
diff --git a/jstests/concurrency/fsm_all_sharded_causal_consistency.js b/jstests/concurrency/fsm_all_sharded_causal_consistency.js
deleted file mode 100644
index 6bd389cda22..00000000000
--- a/jstests/concurrency/fsm_all_sharded_causal_consistency.js
+++ /dev/null
@@ -1,98 +0,0 @@
-'use strict';
-
-load('jstests/concurrency/fsm_libs/runner.js');
-
-var dir = 'jstests/concurrency/fsm_workloads';
-
-var blacklist = [
- // Disabled due to known bugs
- 'distinct.js', // SERVER-13116 distinct isn't sharding aware
- 'distinct_noindex.js', // SERVER-13116 distinct isn't sharding aware
- 'distinct_projection.js', // SERVER-13116 distinct isn't sharding aware
- 'create_database.js', // SERVER-17397 Drops of sharded namespaces may not fully succeed
- 'drop_database.js', // SERVER-17397 Drops of sharded namespaces may not fully succeed
-
- // Disabled due to SERVER-33753, '.count() without a predicate can be wrong on sharded
- // collections'. This bug is problematic for these workloads because they assert on count()
- // values:
- 'agg_match.js',
-
- // $lookup and $graphLookup are not supported on sharded collections.
- 'agg_graph_lookup.js',
- 'view_catalog_cycle_lookup.js',
-
- // Disabled due to SERVER-20057, 'Concurrent, sharded mapReduces can fail when temporary
- // namespaces collide across mongos processes'
- 'map_reduce_drop.js',
- 'map_reduce_inline.js',
- 'map_reduce_merge.js',
- 'map_reduce_merge_nonatomic.js',
- 'map_reduce_reduce.js',
- 'map_reduce_reduce_nonatomic.js',
- 'map_reduce_replace.js',
- 'map_reduce_replace_nonexistent.js',
- 'map_reduce_replace_remove.js',
-
- // Disabled due to MongoDB restrictions and/or workload restrictions
-
- // These workloads sometimes trigger 'Could not lock auth data update lock'
- // errors because the AuthorizationManager currently waits for only five
- // seconds to acquire the lock for authorization documents
- 'auth_create_role.js',
- 'auth_create_user.js',
- 'auth_drop_role.js',
- 'auth_drop_user.js',
-
- 'agg_group_external.js', // uses >100MB of data, which can overwhelm test hosts
- 'agg_sort_external.js', // uses >100MB of data, which can overwhelm test hosts
- 'compact.js', // compact can only be run against a standalone mongod
- 'compact_simultaneous_padding_bytes.js', // compact can only be run against a mongod
- 'convert_to_capped_collection.js', // convertToCapped can't be run on mongos processes
- 'convert_to_capped_collection_index.js', // convertToCapped can't be run on mongos processes
- 'findAndModify_mixed_queue_unindexed.js', // findAndModify requires a shard key
- 'findAndModify_remove_queue.js', // remove cannot be {} for findAndModify
- 'findAndModify_remove_queue_unindexed.js', // findAndModify requires a shard key
- 'findAndModify_update_collscan.js', // findAndModify requires a shard key
- 'findAndModify_update_grow.js', // can cause OOM kills on test hosts
- 'findAndModify_update_queue.js', // findAndModify requires a shard key
- 'findAndModify_update_queue_unindexed.js', // findAndModify requires a shard key
- 'group.js', // the group command cannot be issued against a sharded cluster
- 'group_cond.js', // the group command cannot be issued against a sharded cluster
- 'indexed_insert_eval.js', // eval doesn't work with sharded collections
- 'indexed_insert_eval_nolock.js', // eval doesn't work with sharded collections
-
- 'plan_cache_drop_database.js', // cannot ensureIndex after dropDatabase without sharding first
- 'remove_single_document.js', // our .remove(query, {justOne: true}) calls lack shard keys
- 'remove_single_document_eval.js', // eval doesn't work with sharded collections
- 'remove_single_document_eval_nolock.js', // eval doesn't work with sharded collections
-
- // The rename_* workloads are disabled since renameCollection doesn't work with sharded
- // collections
- 'rename_capped_collection_chain.js',
- 'rename_capped_collection_dbname_chain.js',
- 'rename_capped_collection_dbname_droptarget.js',
- 'rename_capped_collection_droptarget.js',
- 'rename_collection_chain.js',
- 'rename_collection_dbname_chain.js',
- 'rename_collection_dbname_droptarget.js',
- 'rename_collection_droptarget.js',
-
- 'update_simple_eval.js', // eval doesn't work with sharded collections
- 'update_simple_eval_nolock.js', // eval doesn't work with sharded collections
- 'update_upsert_multi.js', // our update queries lack shard keys
- 'update_upsert_multi_noindex.js', // our update queries lack shard keys
- 'upsert_where.js', // cannot use upsert command with $where with sharded collections
- 'yield_and_hashed.js', // stagedebug can only be run against a standalone mongod
- 'yield_and_sorted.js', // stagedebug can only be run against a standalone mongod
-
- 'reindex_background.js' // TODO SERVER-30983
-].map(function(file) {
- return dir + '/' + file;
-});
-
-runWorkloadsSerially(
- ls(dir).filter(function(file) {
- return !Array.contains(blacklist, file);
- }),
- {sharded: {enabled: true}, replication: {enabled: true}},
- {sessionOptions: {causalConsistency: true, readPreference: {mode: "secondary"}}});
diff --git a/jstests/concurrency/fsm_all_sharded_causal_consistency_and_balancer.js b/jstests/concurrency/fsm_all_sharded_causal_consistency_and_balancer.js
deleted file mode 100644
index c192723aa18..00000000000
--- a/jstests/concurrency/fsm_all_sharded_causal_consistency_and_balancer.js
+++ /dev/null
@@ -1,103 +0,0 @@
-'use strict';
-
-load('jstests/concurrency/fsm_libs/runner.js');
-
-var dir = 'jstests/concurrency/fsm_workloads';
-
-var blacklist = [
- // Disabled due to known bugs
- 'distinct.js', // SERVER-13116 distinct isn't sharding aware
- 'distinct_noindex.js', // SERVER-13116 distinct isn't sharding aware
- 'distinct_projection.js', // SERVER-13116 distinct isn't sharding aware
- 'create_database.js', // SERVER-17397 Drops of sharded namespaces may not fully succeed
- 'drop_database.js', // SERVER-17397 Drops of sharded namespaces may not fully succeed
- 'remove_where.js', // SERVER-14669 Multi-removes that use $where miscount removed documents
-
- // Disabled due to SERVER-33753, '.count() without a predicate can be wrong on sharded
- // collections'. This bug is problematic for these workloads because they assert on count()
- // values:
- 'agg_match.js',
-
- // $lookup and $graphLookup are not supported on sharded collections.
- 'agg_graph_lookup.js',
- 'view_catalog_cycle_lookup.js',
-
- // Disabled due to SERVER-20057, 'Concurrent, sharded mapReduces can fail when temporary
- // namespaces collide across mongos processes'
- 'map_reduce_drop.js',
- 'map_reduce_inline.js',
- 'map_reduce_merge.js',
- 'map_reduce_merge_nonatomic.js',
- 'map_reduce_reduce.js',
- 'map_reduce_reduce_nonatomic.js',
- 'map_reduce_replace.js',
- 'map_reduce_replace_nonexistent.js',
- 'map_reduce_replace_remove.js',
-
- // Disabled due to SERVER-13364, 'The geoNear command doesn't handle shard versioning, so a
- // concurrent chunk migration may cause duplicate or missing results'
- 'yield_geo_near_dedup.js',
-
- // Disabled due to MongoDB restrictions and/or workload restrictions
-
- // These workloads sometimes trigger 'Could not lock auth data update lock'
- // errors because the AuthorizationManager currently waits for only five
- // seconds to acquire the lock for authorization documents
- 'auth_create_role.js',
- 'auth_create_user.js',
- 'auth_drop_role.js',
- 'auth_drop_user.js',
-
- 'agg_group_external.js', // uses >100MB of data, which can overwhelm test hosts
- 'agg_sort_external.js', // uses >100MB of data, which can overwhelm test hosts
- 'compact.js', // compact can only be run against a standalone mongod
- 'compact_simultaneous_padding_bytes.js', // compact can only be run against a mongod
- 'convert_to_capped_collection.js', // convertToCapped can't be run on mongos processes
- 'convert_to_capped_collection_index.js', // convertToCapped can't be run on mongos processes
- 'findAndModify_mixed_queue_unindexed.js', // findAndModify requires a shard key
- 'findAndModify_remove_queue.js', // remove cannot be {} for findAndModify
- 'findAndModify_remove_queue_unindexed.js', // findAndModify requires a shard key
- 'findAndModify_update_collscan.js', // findAndModify requires a shard key
- 'findAndModify_update_grow.js', // can cause OOM kills on test hosts
- 'findAndModify_update_queue.js', // findAndModify requires a shard key
- 'findAndModify_update_queue_unindexed.js', // findAndModify requires a shard key
- 'group.js', // the group command cannot be issued against a sharded cluster
- 'group_cond.js', // the group command cannot be issued against a sharded cluster
- 'indexed_insert_eval.js', // eval doesn't work with sharded collections
- 'indexed_insert_eval_nolock.js', // eval doesn't work with sharded collections
-
- 'plan_cache_drop_database.js', // cannot ensureIndex after dropDatabase without sharding first
- 'remove_single_document.js', // our .remove(query, {justOne: true}) calls lack shard keys
- 'remove_single_document_eval.js', // eval doesn't work with sharded collections
- 'remove_single_document_eval_nolock.js', // eval doesn't work with sharded collections
-
- // The rename_* workloads are disabled since renameCollection doesn't work with sharded
- // collections
- 'rename_capped_collection_chain.js',
- 'rename_capped_collection_dbname_chain.js',
- 'rename_capped_collection_dbname_droptarget.js',
- 'rename_capped_collection_droptarget.js',
- 'rename_collection_chain.js',
- 'rename_collection_dbname_chain.js',
- 'rename_collection_dbname_droptarget.js',
- 'rename_collection_droptarget.js',
-
- 'update_simple_eval.js', // eval doesn't work with sharded collections
- 'update_simple_eval_nolock.js', // eval doesn't work with sharded collections
- 'update_upsert_multi.js', // our update queries lack shard keys
- 'update_upsert_multi_noindex.js', // our update queries lack shard keys
- 'upsert_where.js', // cannot use upsert command with $where with sharded collections
- 'yield_and_hashed.js', // stagedebug can only be run against a standalone mongod
- 'yield_and_sorted.js', // stagedebug can only be run against a standalone mongod
-
- 'reindex_background.js' // TODO SERVER-30983
-].map(function(file) {
- return dir + '/' + file;
-});
-
-runWorkloadsSerially(
- ls(dir).filter(function(file) {
- return !Array.contains(blacklist, file);
- }),
- {sharded: {enabled: true, enableBalancer: true}, replication: {enabled: true}},
- {sessionOptions: {causalConsistency: true, readPreference: {mode: "secondary"}}});
diff --git a/jstests/concurrency/fsm_libs/cluster.js b/jstests/concurrency/fsm_libs/cluster.js
index 5033e867d63..f925d3ac51f 100644
--- a/jstests/concurrency/fsm_libs/cluster.js
+++ b/jstests/concurrency/fsm_libs/cluster.js
@@ -3,7 +3,8 @@
/**
* Represents a MongoDB cluster.
*/
-load('jstests/hooks/validate_collections.js'); // Loads the validateCollections function.
+load('jstests/hooks/validate_collections.js'); // For validateCollections.
+load('jstests/concurrency/fsm_libs/shard_fixture.js'); // For FSMShardingTest.
var Cluster = function(options) {
if (!(this instanceof Cluster)) {
@@ -188,6 +189,7 @@ var Cluster = function(options) {
var conn;
var st;
+ var rawST; // The raw ShardingTest object for test suites not using resmoke fixtures.
var initialized = false;
var clusterStartTime;
@@ -209,49 +211,46 @@ var Cluster = function(options) {
if (options.sharded.enabled) {
if (options.useExistingConnectionAsSeed) {
- // Note that depending on how SERVER-21485 is implemented, it may still not be
- // possible to rehydrate a ShardingTest instance from an existing connection because
- // it wouldn't be possible to discover other mongos processes running in the sharded
- // cluster.
- throw new Error(
- "Cluster cannot support 'useExistingConnectionAsSeed' option until" +
- ' SERVER-21485 is implemented');
- }
+ st = new FSMShardingTest(`mongodb://${db.getMongo().host}`);
+ } else {
+ // TODO: allow 'options' to specify the number of shards and mongos processes
+ var shardConfig = {
+ shards: options.sharded.numShards,
+ mongos: options.sharded.numMongos,
+ verbose: verbosityLevel,
+ other: {
+ enableAutoSplit: options.sharded.enableAutoSplit,
+ enableBalancer: options.sharded.enableBalancer,
+ }
+ };
- // TODO: allow 'options' to specify the number of shards and mongos processes
- var shardConfig = {
- shards: options.sharded.numShards,
- mongos: options.sharded.numMongos,
- verbose: verbosityLevel,
- other: {
- enableAutoSplit: options.sharded.enableAutoSplit,
- enableBalancer: options.sharded.enableBalancer,
+ // TODO: allow 'options' to specify an 'rs' config
+ if (options.replication.enabled) {
+ shardConfig.rs = {
+ nodes: makeReplSetTestConfig(options.replication.numNodes,
+ !this.shouldPerformContinuousStepdowns()),
+ // Increase the oplog size (in MB) to prevent rollover
+ // during write-heavy workloads
+ oplogSize: 1024,
+ // Set the electionTimeoutMillis to 1 day to prevent unintended elections
+ settings: {electionTimeoutMillis: 60 * 60 * 24 * 1000},
+ verbose: verbosityLevel
+ };
+ shardConfig.rsOptions = {};
}
- };
- // TODO: allow 'options' to specify an 'rs' config
- if (options.replication.enabled) {
- shardConfig.rs = {
- nodes: makeReplSetTestConfig(options.replication.numNodes,
- !this.shouldPerformContinuousStepdowns()),
- // Increase the oplog size (in MB) to prevent rollover
- // during write-heavy workloads
- oplogSize: 1024,
- // Set the electionTimeoutMillis to 1 day to prevent unintended elections
- settings: {electionTimeoutMillis: 60 * 60 * 24 * 1000},
- verbose: verbosityLevel
- };
- shardConfig.rsOptions = {};
- }
+ if (this.shouldPerformContinuousStepdowns()) {
+ load('jstests/libs/override_methods/continuous_stepdown.js');
+ ContinuousStepdown.configure(options.sharded.stepdownOptions);
+ }
- if (this.shouldPerformContinuousStepdowns()) {
- load('jstests/libs/override_methods/continuous_stepdown.js');
- ContinuousStepdown.configure(options.sharded.stepdownOptions);
- }
+ rawST = new ShardingTest(shardConfig);
+ const hostStr = "mongodb://" + rawST._mongos.map(conn => conn.host).join(",");
- st = new ShardingTest(shardConfig);
+ st = new FSMShardingTest(hostStr);
+ }
- conn = st.s; // mongos
+ conn = st.s(0); // First mongos
this.teardown = function teardown(opts) {
options.teardownFunctions.mongod.forEach(this.executeOnMongodNodes);
@@ -264,45 +263,50 @@ var Cluster = function(options) {
if (this.shouldPerformContinuousStepdowns()) {
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
}
- st.stop(opts);
+
+ if (!options.useExistingConnectionAsSeed) {
+ rawST.stop(opts);
+ }
};
if (this.shouldPerformContinuousStepdowns()) {
this.startContinuousFailover = function() {
- st.startContinuousFailover();
+ rawST.startContinuousFailover();
};
this.stopContinuousFailover = function() {
- st.stopContinuousFailover({waitForPrimary: true, waitForMongosRetarget: true});
+ rawST.stopContinuousFailover(
+ {waitForPrimary: true, waitForMongosRetarget: true});
+
+ // Call getPrimary() to re-establish the connections in FSMShardingTest
+ // as it is not a transparent proxy for SharingTest/rawST.
+ st._configsvr.getPrimary();
+ for (let rst of st._shard_rsts) {
+ rst.getPrimary();
+ }
};
}
- // Save all mongos and mongod connections
- var i = 0;
- var mongos = st.s0;
- var mongod = st.d0;
- while (mongos) {
- _conns.mongos.push(mongos);
- ++i;
- mongos = st['s' + i];
+ // Save all mongos, mongod, and ReplSet connections (if any).
+ var i;
+
+ i = 0;
+ while (st.s(i)) {
+ _conns.mongos.push(st.s(i++));
}
- if (options.replication) {
- var rsTest = st.rs0;
-
- i = 0;
- while (rsTest) {
- this._addReplicaSetConns(rsTest);
- replSets.push(rsTest);
- ++i;
- rsTest = st['rs' + i];
- }
+
+ i = 0;
+ while (st.d(i)) {
+ _conns.mongod.push(st.d(i++));
}
+
i = 0;
- while (mongod) {
- _conns.mongod.push(mongod);
- ++i;
- mongod = st['d' + i];
+ while (st.rs(i)) {
+ var rs = st.rs(i++);
+ this._addReplicaSetConns(rs);
+ replSets.push(rs);
}
+
} else if (options.replication.enabled) {
var replSetConfig = {
nodes: makeReplSetTestConfig(options.replication.numNodes,
@@ -390,12 +394,12 @@ var Cluster = function(options) {
}
var configs = [];
- var config = st.c0;
+ var config = st.c(0);
var i = 0;
while (config) {
configs.push(config);
++i;
- config = st['c' + i];
+ config = st.c(i);
}
configs.forEach(function(conn) {
@@ -520,52 +524,40 @@ var Cluster = function(options) {
var cluster = {mongos: [], config: [], shards: {}};
var i = 0;
- var mongos = st.s0;
+ var mongos = st.s(0);
while (mongos) {
cluster.mongos.push(mongos.name);
++i;
- mongos = st['s' + i];
+ mongos = st.s(i);
}
i = 0;
- var config = st.c0;
+ var config = st.c(0);
while (config) {
cluster.config.push(config.name);
++i;
- config = st['c' + i];
+ config = st.c(i);
}
i = 0;
- var shard = st.shard0;
+ var shard = st.shard(0);
while (shard) {
if (shard.name.includes('/')) {
- // If the shard is a replica set, the format of st.shard0.name in ShardingTest is
+ // If the shard is a replica set, the format of st.shard(0).name in ShardingTest is
// "test-rs0/localhost:20006,localhost:20007,localhost:20008".
var [setName, shards] = shard.name.split('/');
cluster.shards[setName] = shards.split(',');
} else {
- // If the shard is a standalone mongod, the format of st.shard0.name in ShardingTest
- // is "localhost:20006".
+ // If the shard is a standalone mongod, the format of st.shard(0).name in
+ // ShardingTest is "localhost:20006".
cluster.shards[shard.shardName] = [shard.name];
}
++i;
- shard = st['shard' + i];
+ shard = st.shard(i);
}
return cluster;
};
- this.startBalancer = function startBalancer() {
- assert(initialized, 'cluster must be initialized first');
- assert(this.isSharded(), 'cluster is not sharded');
- st.startBalancer();
- };
-
- this.stopBalancer = function stopBalancer() {
- assert(initialized, 'cluster must be initialized first');
- assert(this.isSharded(), 'cluster is not sharded');
- st.stopBalancer();
- };
-
this.isBalancerEnabled = function isBalancerEnabled() {
return this.isSharded() && options.sharded.enableBalancer;
};
@@ -696,7 +688,7 @@ var Cluster = function(options) {
if (this.isSharded()) {
// Get the storage engine the sharded cluster is configured to use from one of the
// shards since mongos won't report it.
- adminDB = st.shard0.getDB('admin');
+ adminDB = st.shard(0).getDB('admin');
}
var res = adminDB.runCommand({getCmdLineOpts: 1});
diff --git a/jstests/concurrency/fsm_libs/fsm.js b/jstests/concurrency/fsm_libs/fsm.js
index e7a3eafb946..0c395bc6c19 100644
--- a/jstests/concurrency/fsm_libs/fsm.js
+++ b/jstests/concurrency/fsm_libs/fsm.js
@@ -21,14 +21,23 @@ var fsm = (function() {
// See fsm_libs/cluster.js for the format of args.cluster.
var connCache;
if (args.passConnectionCache) {
+ // In order to ensure that all operations performed by a worker thread happen on the
+ // same session, we override the "_defaultSession" property of the connections in the
+ // cache to be the same as the session underlying 'args.db'.
+ const makeNewConnWithExistingSession = function(connStr) {
+ const conn = new Mongo(connStr);
+ conn._defaultSession = new _DelegatingDriverSession(conn, args.db.getSession());
+ return conn;
+ };
+
connCache = {mongos: [], config: [], shards: {}};
- connCache.mongos = args.cluster.mongos.map(connStr => new Mongo(connStr));
- connCache.config = args.cluster.config.map(connStr => new Mongo(connStr));
+ connCache.mongos = args.cluster.mongos.map(makeNewConnWithExistingSession);
+ connCache.config = args.cluster.config.map(makeNewConnWithExistingSession);
var shardNames = Object.keys(args.cluster.shards);
shardNames.forEach(name => (connCache.shards[name] = args.cluster.shards[name].map(
- connStr => new Mongo(connStr))));
+ makeNewConnWithExistingSession)));
}
for (var i = 0; i < args.iterations; ++i) {
diff --git a/jstests/concurrency/fsm_libs/resmoke_runner.js b/jstests/concurrency/fsm_libs/resmoke_runner.js
index d94fd4e31cc..3187a16bc05 100644
--- a/jstests/concurrency/fsm_libs/resmoke_runner.js
+++ b/jstests/concurrency/fsm_libs/resmoke_runner.js
@@ -174,7 +174,19 @@
clusterOptions.replication.enabled = true;
clusterOptions.replication.numNodes = topology.nodes.length;
} else if (topology.type === Topology.kShardedCluster) {
- throw new Error("resmoke_runner.js doesn't currently support sharded clusters");
+ clusterOptions.replication.enabled = TestData.usingReplicaSetShards || false;
+ clusterOptions.sharded.enabled = true;
+ clusterOptions.sharded.enableAutoSplit =
+ TestData.hasOwnProperty('runningWithAutoSplit') ? TestData.runningWithAutoSplit : true;
+ clusterOptions.sharded.enableBalancer =
+ TestData.hasOwnProperty('runningWithBalancer') ? TestData.runningWithBalancer : true;
+ clusterOptions.sharded.numMongos = topology.mongos.nodes.length;
+ clusterOptions.sharded.numShards = Object.keys(topology.shards).length;
+ clusterOptions.sharded.stepdownOptions = {};
+ clusterOptions.sharded.stepdownOptions.configStepdown =
+ TestData.runningWithConfigStepdowns || false;
+ clusterOptions.sharded.stepdownOptions.shardStepdown =
+ TestData.runningWithShardStepdowns || false;
} else if (topology.type !== Topology.kStandalone) {
throw new Error('Unrecognized topology format: ' + tojson(topology));
}
@@ -184,5 +196,20 @@
workloads = [workloads];
}
- runWorkloads(workloads, {cluster: clusterOptions});
+ let sessionOptions = {};
+ if (TestData.runningWithCausalConsistency) {
+ sessionOptions = Object.assign(
+ sessionOptions, {causalConsistency: true, readPreference: {mode: 'secondary'}});
+ }
+ if (TestData.runningWithConfigStepdowns || TestData.runningWithShardStepdowns) {
+ sessionOptions = Object.assign(sessionOptions, {retryWrites: true});
+ }
+
+ const executionOptions = {dbNamePrefix: TestData.dbNamePrefix || ""};
+
+ if (Object.keys(sessionOptions).length > 0) {
+ executionOptions.sessionOptions = sessionOptions;
+ }
+
+ runWorkloads(workloads, {cluster: clusterOptions, execution: executionOptions});
})();
diff --git a/jstests/concurrency/fsm_libs/shard_fixture.js b/jstests/concurrency/fsm_libs/shard_fixture.js
index 807de6d5e52..fb09789dbe1 100644
--- a/jstests/concurrency/fsm_libs/shard_fixture.js
+++ b/jstests/concurrency/fsm_libs/shard_fixture.js
@@ -1,6 +1,6 @@
load('jstests/libs/discover_topology.js');
-class FSMShardingTest {
+var FSMShardingTest = class {
constructor(connStr) {
/**
* `topology` has the following format:
@@ -81,7 +81,11 @@ class FSMShardingTest {
}
d(n = 0) {
- return this.shard(n);
+ // Only return for non-replset shards.
+ if (this._shard_rsts[n] === undefined) {
+ return this._shard_connections[n];
+ }
+ return undefined;
}
/**
@@ -122,4 +126,4 @@ class FSMShardingTest {
/*
* Internal Functions.
*/
-}
+};
diff --git a/jstests/concurrency/fsm_libs/worker_thread.js b/jstests/concurrency/fsm_libs/worker_thread.js
index 3b2ec6e8571..806d04f20cb 100644
--- a/jstests/concurrency/fsm_libs/worker_thread.js
+++ b/jstests/concurrency/fsm_libs/worker_thread.js
@@ -72,7 +72,21 @@ var workerThread = (function() {
delete args.sessionOptions.initialOperationTime;
}
- const session = new Mongo(connectionString).startSession(args.sessionOptions);
+ const mongo = new Mongo(connectionString);
+
+ const session = mongo.startSession(args.sessionOptions);
+ const readPreference = session.getOptions().getReadPreference();
+ if (readPreference && readPreference.mode === 'secondary') {
+ // Unset the explicit read preference so set_read_preference_secondary.js can do
+ // the right thing based on the DB.
+ session.getOptions().setReadPreference(undefined);
+
+ // We load() set_read_preference_secondary.js in order to avoid running
+ // commands against the "admin" and "config" databases via mongos with
+ // readPreference={mode: "secondary"} when there's only a single node in
+ // the CSRS.
+ load('jstests/libs/override_methods/set_read_preference_secondary.js');
+ }
if (typeof initialClusterTime !== 'undefined') {
session.advanceClusterTime(initialClusterTime);
diff --git a/jstests/concurrency/fsm_workloads/sharded_base_partitioned.js b/jstests/concurrency/fsm_workloads/sharded_base_partitioned.js
index a476eb43860..b6bcf8fd76e 100644
--- a/jstests/concurrency/fsm_workloads/sharded_base_partitioned.js
+++ b/jstests/concurrency/fsm_workloads/sharded_base_partitioned.js
@@ -31,8 +31,8 @@ var $config = (function() {
shardKey: {_id: 1},
};
- data.makePartition = function makePartition(tid, partitionSize) {
- var partition = {};
+ data.makePartition = function makePartition(ns, tid, partitionSize) {
+ var partition = {ns: ns};
partition.lower = tid * partitionSize;
partition.upper = (tid * partitionSize) + partitionSize;
@@ -67,18 +67,23 @@ var $config = (function() {
// This may be due to SERVER-18341, where the Matcher returns false positives in
// comparison predicates with MinKey/MaxKey.
if (this.partition.isLowChunk && this.partition.isHighChunk) {
- return coll.aggregate([{$sample: {size: 1}}]).toArray()[0];
+ return coll
+ .aggregate([
+ {$match: {ns: this.partition.ns}},
+ {$sample: {size: 1}},
+ ])
+ .toArray()[0];
} else if (this.partition.isLowChunk) {
return coll
.aggregate([
- {$match: {'max._id': {$lte: this.partition.chunkUpper}}},
+ {$match: {ns: this.partition.ns, 'max._id': {$lte: this.partition.chunkUpper}}},
{$sample: {size: 1}}
])
.toArray()[0];
} else if (this.partition.isHighChunk) {
return coll
.aggregate([
- {$match: {'min._id': {$gte: this.partition.chunkLower}}},
+ {$match: {ns: this.partition.ns, 'min._id': {$gte: this.partition.chunkLower}}},
{$sample: {size: 1}}
])
.toArray()[0];
@@ -87,6 +92,7 @@ var $config = (function() {
.aggregate([
{
$match: {
+ ns: this.partition.ns,
'min._id': {$gte: this.partition.chunkLower},
'max._id': {$lte: this.partition.chunkUpper}
}
@@ -105,13 +111,13 @@ var $config = (function() {
// Inform this thread about its partition,
// and verify that its partition is encapsulated in a single chunk.
function init(db, collName, connCache) {
+ var ns = db[collName].getFullName();
+
// Inform this thread about its partition.
// The tid of each thread is assumed to be in the range [0, this.threadCount).
- this.partition = this.makePartition(this.tid, this.partitionSize);
+ this.partition = this.makePartition(ns, this.tid, this.partitionSize);
Object.freeze(this.partition);
- var ns = db[collName].getFullName();
-
// Verify that there is exactly 1 chunk in our partition.
var config = ChunkHelper.getPrimary(connCache.config);
var numChunks = ChunkHelper.getNumChunks(
@@ -147,7 +153,7 @@ var $config = (function() {
for (var tid = 0; tid < this.threadCount; ++tid) {
// Define this thread's partition.
// The tid of each thread is assumed to be in the range [0, this.threadCount).
- var partition = this.makePartition(tid, this.partitionSize);
+ var partition = this.makePartition(ns, tid, this.partitionSize);
// Populate this thread's partition.
var bulk = db[collName].initializeUnorderedBulkOp();
diff --git a/jstests/concurrency/fsm_workloads/sharded_mergeChunks_partitioned.js b/jstests/concurrency/fsm_workloads/sharded_mergeChunks_partitioned.js
index b7ab427c434..5b4b9ea996d 100644
--- a/jstests/concurrency/fsm_workloads/sharded_mergeChunks_partitioned.js
+++ b/jstests/concurrency/fsm_workloads/sharded_mergeChunks_partitioned.js
@@ -58,7 +58,8 @@ var $config = extendWorkload($config, function($config, $super) {
$config.states.init = function init(db, collName, connCache) {
// Inform this thread about its partition.
// Each thread has tid in range 0..(n-1) where n is the number of threads.
- this.partition = this.makePartition(this.tid, this.partitionSize);
+ this.partition =
+ this.makePartition(db[collName].getFullName(), this.tid, this.partitionSize);
Object.freeze(this.partition);
var config = ChunkHelper.getPrimary(connCache.config);
@@ -90,6 +91,7 @@ var $config = extendWorkload($config, function($config, $super) {
// Skip this iteration if our data partition contains less than 2 chunks.
if (configDB.chunks
.find({
+ ns: ns,
'min._id': {$gte: this.partition.lower},
'max._id': {$lte: this.partition.upper}
})
@@ -101,9 +103,9 @@ var $config = extendWorkload($config, function($config, $super) {
chunk1 = this.getRandomChunkInPartition(config);
// If we randomly chose the last chunk, choose the one before it.
if (chunk1.max._id === this.partition.chunkUpper) {
- chunk1 = configDB.chunks.findOne({'max._id': chunk1.min._id});
+ chunk1 = configDB.chunks.findOne({ns: ns, 'max._id': chunk1.min._id});
}
- chunk2 = configDB.chunks.findOne({'min._id': chunk1.max._id});
+ chunk2 = configDB.chunks.findOne({ns: ns, 'min._id': chunk1.max._id});
// Save the number of documents found in these two chunks' ranges before the mergeChunks
// operation. This will be used to verify that the same number of documents in that
diff --git a/jstests/noPassthrough/shard_fixture_selftest.js b/jstests/noPassthrough/shard_fixture_selftest.js
index 7bbaeeb8874..b4b56ba74d6 100644
--- a/jstests/noPassthrough/shard_fixture_selftest.js
+++ b/jstests/noPassthrough/shard_fixture_selftest.js
@@ -28,6 +28,8 @@
assert.eq(rsTestWrapper.rs(1).getURL(), rsTestOriginal.rs1.getURL());
assert.eq(rsTestWrapper.rs(2), rsTestOriginal.rs2); // Both should be undefined.
+ assert.eq(rsTestWrapper.d(0), rsTestOriginal.d0); // Both should be undefined.
+
assert.eq(rsTestWrapper.c(0).host, rsTestOriginal.c0.host);
assert.eq(rsTestWrapper.c(1).host, rsTestOriginal.c1.host);
assert.eq(rsTestWrapper.c(2), rsTestOriginal.c2); // Both should be undefined.
@@ -46,7 +48,9 @@
assert.eq(dTestWrapper.shard(0).host, dTestOriginal.shard0.host);
assert.eq(dTestWrapper.s(0).host, dTestOriginal.s0.host);
assert.eq(dTestWrapper.d(0).host, dTestOriginal.d0.host);
- assert.eq(rsTestWrapper.c(0).host, rsTestOriginal.c0.host);
+ assert.eq(dTestWrapper.c(0).host, dTestOriginal.c0.host);
+
+ assert.eq(dTestWrapper.rs(0), dTestOriginal.rs0); // Both should be undefined.
dTestOriginal.stop();
})(); \ No newline at end of file