summaryrefslogtreecommitdiff
path: root/jstests/concurrency/fsm_libs
diff options
context:
space:
mode:
authorRobert Guo <robert.guo@10gen.com>2018-04-11 11:05:13 -0400
committerRobert Guo <robert.guo@10gen.com>2018-04-30 09:56:57 -0400
commit39622745cd5258d40924c8e44be73b5c2e1b4ca4 (patch)
treecfb986e14083c3736747ced43246e92fa51978b0 /jstests/concurrency/fsm_libs
parent0b04f8bab03c64477b6ffd60fcd1c592dd4ca2b1 (diff)
downloadmongo-39622745cd5258d40924c8e44be73b5c2e1b4ca4.tar.gz
SERVER-19630 allow FSM tests to connect to an existing cluster
Diffstat (limited to 'jstests/concurrency/fsm_libs')
-rw-r--r--jstests/concurrency/fsm_libs/cluster.js166
-rw-r--r--jstests/concurrency/fsm_libs/fsm.js15
-rw-r--r--jstests/concurrency/fsm_libs/resmoke_runner.js31
-rw-r--r--jstests/concurrency/fsm_libs/shard_fixture.js10
-rw-r--r--jstests/concurrency/fsm_libs/worker_thread.js16
5 files changed, 142 insertions, 96 deletions
diff --git a/jstests/concurrency/fsm_libs/cluster.js b/jstests/concurrency/fsm_libs/cluster.js
index 5033e867d63..f925d3ac51f 100644
--- a/jstests/concurrency/fsm_libs/cluster.js
+++ b/jstests/concurrency/fsm_libs/cluster.js
@@ -3,7 +3,8 @@
/**
* Represents a MongoDB cluster.
*/
-load('jstests/hooks/validate_collections.js'); // Loads the validateCollections function.
+load('jstests/hooks/validate_collections.js'); // For validateCollections.
+load('jstests/concurrency/fsm_libs/shard_fixture.js'); // For FSMShardingTest.
var Cluster = function(options) {
if (!(this instanceof Cluster)) {
@@ -188,6 +189,7 @@ var Cluster = function(options) {
var conn;
var st;
+ var rawST; // The raw ShardingTest object for test suites not using resmoke fixtures.
var initialized = false;
var clusterStartTime;
@@ -209,49 +211,46 @@ var Cluster = function(options) {
if (options.sharded.enabled) {
if (options.useExistingConnectionAsSeed) {
- // Note that depending on how SERVER-21485 is implemented, it may still not be
- // possible to rehydrate a ShardingTest instance from an existing connection because
- // it wouldn't be possible to discover other mongos processes running in the sharded
- // cluster.
- throw new Error(
- "Cluster cannot support 'useExistingConnectionAsSeed' option until" +
- ' SERVER-21485 is implemented');
- }
+ st = new FSMShardingTest(`mongodb://${db.getMongo().host}`);
+ } else {
+ // TODO: allow 'options' to specify the number of shards and mongos processes
+ var shardConfig = {
+ shards: options.sharded.numShards,
+ mongos: options.sharded.numMongos,
+ verbose: verbosityLevel,
+ other: {
+ enableAutoSplit: options.sharded.enableAutoSplit,
+ enableBalancer: options.sharded.enableBalancer,
+ }
+ };
- // TODO: allow 'options' to specify the number of shards and mongos processes
- var shardConfig = {
- shards: options.sharded.numShards,
- mongos: options.sharded.numMongos,
- verbose: verbosityLevel,
- other: {
- enableAutoSplit: options.sharded.enableAutoSplit,
- enableBalancer: options.sharded.enableBalancer,
+ // TODO: allow 'options' to specify an 'rs' config
+ if (options.replication.enabled) {
+ shardConfig.rs = {
+ nodes: makeReplSetTestConfig(options.replication.numNodes,
+ !this.shouldPerformContinuousStepdowns()),
+ // Increase the oplog size (in MB) to prevent rollover
+ // during write-heavy workloads
+ oplogSize: 1024,
+ // Set the electionTimeoutMillis to 1 day to prevent unintended elections
+ settings: {electionTimeoutMillis: 60 * 60 * 24 * 1000},
+ verbose: verbosityLevel
+ };
+ shardConfig.rsOptions = {};
}
- };
- // TODO: allow 'options' to specify an 'rs' config
- if (options.replication.enabled) {
- shardConfig.rs = {
- nodes: makeReplSetTestConfig(options.replication.numNodes,
- !this.shouldPerformContinuousStepdowns()),
- // Increase the oplog size (in MB) to prevent rollover
- // during write-heavy workloads
- oplogSize: 1024,
- // Set the electionTimeoutMillis to 1 day to prevent unintended elections
- settings: {electionTimeoutMillis: 60 * 60 * 24 * 1000},
- verbose: verbosityLevel
- };
- shardConfig.rsOptions = {};
- }
+ if (this.shouldPerformContinuousStepdowns()) {
+ load('jstests/libs/override_methods/continuous_stepdown.js');
+ ContinuousStepdown.configure(options.sharded.stepdownOptions);
+ }
- if (this.shouldPerformContinuousStepdowns()) {
- load('jstests/libs/override_methods/continuous_stepdown.js');
- ContinuousStepdown.configure(options.sharded.stepdownOptions);
- }
+ rawST = new ShardingTest(shardConfig);
+ const hostStr = "mongodb://" + rawST._mongos.map(conn => conn.host).join(",");
- st = new ShardingTest(shardConfig);
+ st = new FSMShardingTest(hostStr);
+ }
- conn = st.s; // mongos
+ conn = st.s(0); // First mongos
this.teardown = function teardown(opts) {
options.teardownFunctions.mongod.forEach(this.executeOnMongodNodes);
@@ -264,45 +263,50 @@ var Cluster = function(options) {
if (this.shouldPerformContinuousStepdowns()) {
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
}
- st.stop(opts);
+
+ if (!options.useExistingConnectionAsSeed) {
+ rawST.stop(opts);
+ }
};
if (this.shouldPerformContinuousStepdowns()) {
this.startContinuousFailover = function() {
- st.startContinuousFailover();
+ rawST.startContinuousFailover();
};
this.stopContinuousFailover = function() {
- st.stopContinuousFailover({waitForPrimary: true, waitForMongosRetarget: true});
+ rawST.stopContinuousFailover(
+ {waitForPrimary: true, waitForMongosRetarget: true});
+
+ // Call getPrimary() to re-establish the connections in FSMShardingTest
+ // as it is not a transparent proxy for SharingTest/rawST.
+ st._configsvr.getPrimary();
+ for (let rst of st._shard_rsts) {
+ rst.getPrimary();
+ }
};
}
- // Save all mongos and mongod connections
- var i = 0;
- var mongos = st.s0;
- var mongod = st.d0;
- while (mongos) {
- _conns.mongos.push(mongos);
- ++i;
- mongos = st['s' + i];
+ // Save all mongos, mongod, and ReplSet connections (if any).
+ var i;
+
+ i = 0;
+ while (st.s(i)) {
+ _conns.mongos.push(st.s(i++));
}
- if (options.replication) {
- var rsTest = st.rs0;
-
- i = 0;
- while (rsTest) {
- this._addReplicaSetConns(rsTest);
- replSets.push(rsTest);
- ++i;
- rsTest = st['rs' + i];
- }
+
+ i = 0;
+ while (st.d(i)) {
+ _conns.mongod.push(st.d(i++));
}
+
i = 0;
- while (mongod) {
- _conns.mongod.push(mongod);
- ++i;
- mongod = st['d' + i];
+ while (st.rs(i)) {
+ var rs = st.rs(i++);
+ this._addReplicaSetConns(rs);
+ replSets.push(rs);
}
+
} else if (options.replication.enabled) {
var replSetConfig = {
nodes: makeReplSetTestConfig(options.replication.numNodes,
@@ -390,12 +394,12 @@ var Cluster = function(options) {
}
var configs = [];
- var config = st.c0;
+ var config = st.c(0);
var i = 0;
while (config) {
configs.push(config);
++i;
- config = st['c' + i];
+ config = st.c(i);
}
configs.forEach(function(conn) {
@@ -520,52 +524,40 @@ var Cluster = function(options) {
var cluster = {mongos: [], config: [], shards: {}};
var i = 0;
- var mongos = st.s0;
+ var mongos = st.s(0);
while (mongos) {
cluster.mongos.push(mongos.name);
++i;
- mongos = st['s' + i];
+ mongos = st.s(i);
}
i = 0;
- var config = st.c0;
+ var config = st.c(0);
while (config) {
cluster.config.push(config.name);
++i;
- config = st['c' + i];
+ config = st.c(i);
}
i = 0;
- var shard = st.shard0;
+ var shard = st.shard(0);
while (shard) {
if (shard.name.includes('/')) {
- // If the shard is a replica set, the format of st.shard0.name in ShardingTest is
+ // If the shard is a replica set, the format of st.shard(0).name in ShardingTest is
// "test-rs0/localhost:20006,localhost:20007,localhost:20008".
var [setName, shards] = shard.name.split('/');
cluster.shards[setName] = shards.split(',');
} else {
- // If the shard is a standalone mongod, the format of st.shard0.name in ShardingTest
- // is "localhost:20006".
+ // If the shard is a standalone mongod, the format of st.shard(0).name in
+ // ShardingTest is "localhost:20006".
cluster.shards[shard.shardName] = [shard.name];
}
++i;
- shard = st['shard' + i];
+ shard = st.shard(i);
}
return cluster;
};
- this.startBalancer = function startBalancer() {
- assert(initialized, 'cluster must be initialized first');
- assert(this.isSharded(), 'cluster is not sharded');
- st.startBalancer();
- };
-
- this.stopBalancer = function stopBalancer() {
- assert(initialized, 'cluster must be initialized first');
- assert(this.isSharded(), 'cluster is not sharded');
- st.stopBalancer();
- };
-
this.isBalancerEnabled = function isBalancerEnabled() {
return this.isSharded() && options.sharded.enableBalancer;
};
@@ -696,7 +688,7 @@ var Cluster = function(options) {
if (this.isSharded()) {
// Get the storage engine the sharded cluster is configured to use from one of the
// shards since mongos won't report it.
- adminDB = st.shard0.getDB('admin');
+ adminDB = st.shard(0).getDB('admin');
}
var res = adminDB.runCommand({getCmdLineOpts: 1});
diff --git a/jstests/concurrency/fsm_libs/fsm.js b/jstests/concurrency/fsm_libs/fsm.js
index e7a3eafb946..0c395bc6c19 100644
--- a/jstests/concurrency/fsm_libs/fsm.js
+++ b/jstests/concurrency/fsm_libs/fsm.js
@@ -21,14 +21,23 @@ var fsm = (function() {
// See fsm_libs/cluster.js for the format of args.cluster.
var connCache;
if (args.passConnectionCache) {
+ // In order to ensure that all operations performed by a worker thread happen on the
+ // same session, we override the "_defaultSession" property of the connections in the
+ // cache to be the same as the session underlying 'args.db'.
+ const makeNewConnWithExistingSession = function(connStr) {
+ const conn = new Mongo(connStr);
+ conn._defaultSession = new _DelegatingDriverSession(conn, args.db.getSession());
+ return conn;
+ };
+
connCache = {mongos: [], config: [], shards: {}};
- connCache.mongos = args.cluster.mongos.map(connStr => new Mongo(connStr));
- connCache.config = args.cluster.config.map(connStr => new Mongo(connStr));
+ connCache.mongos = args.cluster.mongos.map(makeNewConnWithExistingSession);
+ connCache.config = args.cluster.config.map(makeNewConnWithExistingSession);
var shardNames = Object.keys(args.cluster.shards);
shardNames.forEach(name => (connCache.shards[name] = args.cluster.shards[name].map(
- connStr => new Mongo(connStr))));
+ makeNewConnWithExistingSession)));
}
for (var i = 0; i < args.iterations; ++i) {
diff --git a/jstests/concurrency/fsm_libs/resmoke_runner.js b/jstests/concurrency/fsm_libs/resmoke_runner.js
index d94fd4e31cc..3187a16bc05 100644
--- a/jstests/concurrency/fsm_libs/resmoke_runner.js
+++ b/jstests/concurrency/fsm_libs/resmoke_runner.js
@@ -174,7 +174,19 @@
clusterOptions.replication.enabled = true;
clusterOptions.replication.numNodes = topology.nodes.length;
} else if (topology.type === Topology.kShardedCluster) {
- throw new Error("resmoke_runner.js doesn't currently support sharded clusters");
+ clusterOptions.replication.enabled = TestData.usingReplicaSetShards || false;
+ clusterOptions.sharded.enabled = true;
+ clusterOptions.sharded.enableAutoSplit =
+ TestData.hasOwnProperty('runningWithAutoSplit') ? TestData.runningWithAutoSplit : true;
+ clusterOptions.sharded.enableBalancer =
+ TestData.hasOwnProperty('runningWithBalancer') ? TestData.runningWithBalancer : true;
+ clusterOptions.sharded.numMongos = topology.mongos.nodes.length;
+ clusterOptions.sharded.numShards = Object.keys(topology.shards).length;
+ clusterOptions.sharded.stepdownOptions = {};
+ clusterOptions.sharded.stepdownOptions.configStepdown =
+ TestData.runningWithConfigStepdowns || false;
+ clusterOptions.sharded.stepdownOptions.shardStepdown =
+ TestData.runningWithShardStepdowns || false;
} else if (topology.type !== Topology.kStandalone) {
throw new Error('Unrecognized topology format: ' + tojson(topology));
}
@@ -184,5 +196,20 @@
workloads = [workloads];
}
- runWorkloads(workloads, {cluster: clusterOptions});
+ let sessionOptions = {};
+ if (TestData.runningWithCausalConsistency) {
+ sessionOptions = Object.assign(
+ sessionOptions, {causalConsistency: true, readPreference: {mode: 'secondary'}});
+ }
+ if (TestData.runningWithConfigStepdowns || TestData.runningWithShardStepdowns) {
+ sessionOptions = Object.assign(sessionOptions, {retryWrites: true});
+ }
+
+ const executionOptions = {dbNamePrefix: TestData.dbNamePrefix || ""};
+
+ if (Object.keys(sessionOptions).length > 0) {
+ executionOptions.sessionOptions = sessionOptions;
+ }
+
+ runWorkloads(workloads, {cluster: clusterOptions, execution: executionOptions});
})();
diff --git a/jstests/concurrency/fsm_libs/shard_fixture.js b/jstests/concurrency/fsm_libs/shard_fixture.js
index 807de6d5e52..fb09789dbe1 100644
--- a/jstests/concurrency/fsm_libs/shard_fixture.js
+++ b/jstests/concurrency/fsm_libs/shard_fixture.js
@@ -1,6 +1,6 @@
load('jstests/libs/discover_topology.js');
-class FSMShardingTest {
+var FSMShardingTest = class {
constructor(connStr) {
/**
* `topology` has the following format:
@@ -81,7 +81,11 @@ class FSMShardingTest {
}
d(n = 0) {
- return this.shard(n);
+ // Only return for non-replset shards.
+ if (this._shard_rsts[n] === undefined) {
+ return this._shard_connections[n];
+ }
+ return undefined;
}
/**
@@ -122,4 +126,4 @@ class FSMShardingTest {
/*
* Internal Functions.
*/
-}
+};
diff --git a/jstests/concurrency/fsm_libs/worker_thread.js b/jstests/concurrency/fsm_libs/worker_thread.js
index 3b2ec6e8571..806d04f20cb 100644
--- a/jstests/concurrency/fsm_libs/worker_thread.js
+++ b/jstests/concurrency/fsm_libs/worker_thread.js
@@ -72,7 +72,21 @@ var workerThread = (function() {
delete args.sessionOptions.initialOperationTime;
}
- const session = new Mongo(connectionString).startSession(args.sessionOptions);
+ const mongo = new Mongo(connectionString);
+
+ const session = mongo.startSession(args.sessionOptions);
+ const readPreference = session.getOptions().getReadPreference();
+ if (readPreference && readPreference.mode === 'secondary') {
+ // Unset the explicit read preference so set_read_preference_secondary.js can do
+ // the right thing based on the DB.
+ session.getOptions().setReadPreference(undefined);
+
+ // We load() set_read_preference_secondary.js in order to avoid running
+ // commands against the "admin" and "config" databases via mongos with
+ // readPreference={mode: "secondary"} when there's only a single node in
+ // the CSRS.
+ load('jstests/libs/override_methods/set_read_preference_secondary.js');
+ }
if (typeof initialClusterTime !== 'undefined') {
session.advanceClusterTime(initialClusterTime);