summaryrefslogtreecommitdiff
path: root/jstests/concurrency/fsm_libs
diff options
context:
space:
mode:
Diffstat (limited to 'jstests/concurrency/fsm_libs')
-rw-r--r--jstests/concurrency/fsm_libs/assert.js2
-rw-r--r--jstests/concurrency/fsm_libs/cluster.js15
-rw-r--r--jstests/concurrency/fsm_libs/composer.js2
-rw-r--r--jstests/concurrency/fsm_libs/extend_workload.js3
-rw-r--r--jstests/concurrency/fsm_libs/parse_config.js4
-rw-r--r--jstests/concurrency/fsm_libs/resmoke_runner.js493
-rw-r--r--jstests/concurrency/fsm_libs/runner.js14
-rw-r--r--jstests/concurrency/fsm_libs/worker_thread.js4
8 files changed, 264 insertions, 273 deletions
diff --git a/jstests/concurrency/fsm_libs/assert.js b/jstests/concurrency/fsm_libs/assert.js
index 437742ac396..f4b47acc0fe 100644
--- a/jstests/concurrency/fsm_libs/assert.js
+++ b/jstests/concurrency/fsm_libs/assert.js
@@ -12,7 +12,6 @@
*/
var AssertLevel = (function() {
-
function AssertLevel(level) {
this.level = level;
@@ -34,7 +33,6 @@ var AssertLevel = (function() {
OWN_DB: new AssertLevel(2),
isAssertLevel: isAssertLevel
};
-
})();
if (typeof globalAssertLevel === 'undefined') {
diff --git a/jstests/concurrency/fsm_libs/cluster.js b/jstests/concurrency/fsm_libs/cluster.js
index b848fa355ed..1b24c9bbdbb 100644
--- a/jstests/concurrency/fsm_libs/cluster.js
+++ b/jstests/concurrency/fsm_libs/cluster.js
@@ -53,8 +53,8 @@ var Cluster = function(options) {
getObjectKeys(options).forEach(function(option) {
assert.contains(option,
allowedKeys,
- 'invalid option: ' + tojson(option) + '; valid options are: ' +
- tojson(allowedKeys));
+ 'invalid option: ' + tojson(option) +
+ '; valid options are: ' + tojson(allowedKeys));
});
options.replication = options.replication || {};
@@ -271,7 +271,7 @@ var Cluster = function(options) {
this.executeOnMongodNodes = function executeOnMongodNodes(fn) {
assert(initialized, 'cluster must be initialized first');
- if (!fn || typeof(fn) !== 'function' || fn.length !== 1) {
+ if (!fn || typeof (fn) !== 'function' || fn.length !== 1) {
throw new Error('mongod function must be a function that takes a db as an argument');
}
_conns.mongod.forEach(function(mongodConn) {
@@ -282,7 +282,7 @@ var Cluster = function(options) {
this.executeOnMongosNodes = function executeOnMongosNodes(fn) {
assert(initialized, 'cluster must be initialized first');
- if (!fn || typeof(fn) !== 'function' || fn.length !== 1) {
+ if (!fn || typeof (fn) !== 'function' || fn.length !== 1) {
throw new Error('mongos function must be a function that takes a db as an argument');
}
_conns.mongos.forEach(function(mongosConn) {
@@ -293,7 +293,7 @@ var Cluster = function(options) {
this.executeOnConfigNodes = function executeOnConfigNodes(fn) {
assert(initialized, 'cluster must be initialized first');
- if (!fn || typeof(fn) !== 'function' || fn.length !== 1) {
+ if (!fn || typeof (fn) !== 'function' || fn.length !== 1) {
throw new Error('config function must be a function that takes a db as an argument');
}
st._configServers.forEach(function(conn) {
@@ -553,8 +553,8 @@ var Cluster = function(options) {
var primaryInfo = replSetStatus.members.find(memberInfo => memberInfo.self);
assert(primaryInfo !== undefined,
- phase + ', failed to find self in replication status: ' +
- tojson(replSetStatus));
+ phase +
+ ', failed to find self in replication status: ' + tojson(replSetStatus));
// Wait for all previous workload operations to complete, with "getLastError".
res = primary.getDB('test').runCommand({
@@ -565,7 +565,6 @@ var Cluster = function(options) {
});
assert.commandWorked(res, phase + ', error awaiting replication');
}
-
});
};
diff --git a/jstests/concurrency/fsm_libs/composer.js b/jstests/concurrency/fsm_libs/composer.js
index 99cfb64f34d..22d78e77871 100644
--- a/jstests/concurrency/fsm_libs/composer.js
+++ b/jstests/concurrency/fsm_libs/composer.js
@@ -1,7 +1,6 @@
load('jstests/concurrency/fsm_libs/fsm.js');
var composer = (function() {
-
function runCombinedFSM(workloads, configs, mixProb) {
// TODO: what if a workload depends on iterations?
var iterations = 100;
@@ -70,5 +69,4 @@ var composer = (function() {
}
return {run: runCombinedFSM};
-
})();
diff --git a/jstests/concurrency/fsm_libs/extend_workload.js b/jstests/concurrency/fsm_libs/extend_workload.js
index 2b6fe47ed4e..84d094cb36e 100644
--- a/jstests/concurrency/fsm_libs/extend_workload.js
+++ b/jstests/concurrency/fsm_libs/extend_workload.js
@@ -2,7 +2,8 @@
load('jstests/concurrency/fsm_libs/parse_config.js'); // for parseConfig
-/** extendWorkload usage:
+/**
+ * extendWorkload usage:
*
* $config = extendWorkload($config, function($config, $super) {
* // ... modify $config ...
diff --git a/jstests/concurrency/fsm_libs/parse_config.js b/jstests/concurrency/fsm_libs/parse_config.js
index 3c365dc5f4c..3d673e6c062 100644
--- a/jstests/concurrency/fsm_libs/parse_config.js
+++ b/jstests/concurrency/fsm_libs/parse_config.js
@@ -61,8 +61,8 @@ function parseConfig(config) {
assert.gt(Object.keys(config.transitions[fromState]).length, 0);
Object.keys(config.transitions[fromState]).forEach(function(toState) {
assert(config.states.hasOwnProperty(toState),
- 'config.transitions.' + fromState + ' contains a state not in config.states: ' +
- toState);
+ 'config.transitions.' + fromState +
+ ' contains a state not in config.states: ' + toState);
assert.eq('number',
typeof config.transitions[fromState][toState],
'transitions.' + fromState + '.' + toState + ' should be a number');
diff --git a/jstests/concurrency/fsm_libs/resmoke_runner.js b/jstests/concurrency/fsm_libs/resmoke_runner.js
index 908eb126cb0..75569dde140 100644
--- a/jstests/concurrency/fsm_libs/resmoke_runner.js
+++ b/jstests/concurrency/fsm_libs/resmoke_runner.js
@@ -1,285 +1,284 @@
(function() {
- 'use strict';
+'use strict';
+
+load('jstests/concurrency/fsm_libs/runner.js'); // for runner.internals
+load('jstests/libs/discover_topology.js'); // For Topology and DiscoverTopology.
+
+const validateExecutionOptions = runner.internals.validateExecutionOptions;
+const prepareCollections = runner.internals.prepareCollections;
+const WorkloadFailure = runner.internals.WorkloadFailure;
+const throwError = runner.internals.throwError;
+const setupWorkload = runner.internals.setupWorkload;
+const teardownWorkload = runner.internals.teardownWorkload;
+const setIterations = runner.internals.setIterations;
+const setThreadCount = runner.internals.setThreadCount;
+const loadWorkloadContext = runner.internals.loadWorkloadContext;
+
+// Returns true if the workload's teardown succeeds and false if the workload's teardown fails.
+function cleanupWorkload(workload, context, cluster, errors, header) {
+ const phase = 'before workload ' + workload + ' teardown';
+
+ try {
+ teardownWorkload(workload, context, cluster);
+ } catch (e) {
+ errors.push(new WorkloadFailure(e.toString(), e.stack, 'main', header + ' Teardown'));
+ return false;
+ }
- load('jstests/concurrency/fsm_libs/runner.js'); // for runner.internals
- load('jstests/libs/discover_topology.js'); // For Topology and DiscoverTopology.
+ return true;
+}
- const validateExecutionOptions = runner.internals.validateExecutionOptions;
- const prepareCollections = runner.internals.prepareCollections;
- const WorkloadFailure = runner.internals.WorkloadFailure;
- const throwError = runner.internals.throwError;
- const setupWorkload = runner.internals.setupWorkload;
- const teardownWorkload = runner.internals.teardownWorkload;
- const setIterations = runner.internals.setIterations;
- const setThreadCount = runner.internals.setThreadCount;
- const loadWorkloadContext = runner.internals.loadWorkloadContext;
+function runWorkloads(workloads,
+ {cluster: clusterOptions = {}, execution: executionOptions = {}} = {}) {
+ assert.gt(workloads.length, 0, 'need at least one workload to run');
- // Returns true if the workload's teardown succeeds and false if the workload's teardown fails.
- function cleanupWorkload(workload, context, cluster, errors, header) {
- const phase = 'before workload ' + workload + ' teardown';
+ const executionMode = {serial: true};
+ validateExecutionOptions(executionMode, executionOptions);
+ Object.freeze(executionOptions); // immutable after validation (and normalization)
- try {
- teardownWorkload(workload, context, cluster);
- } catch (e) {
- errors.push(new WorkloadFailure(e.toString(), e.stack, 'main', header + ' Teardown'));
- return false;
- }
-
- return true;
+ // Determine how strong to make assertions while simultaneously executing different
+ // workloads.
+ let assertLevel = AssertLevel.OWN_DB;
+ if (clusterOptions.sameDB) {
+ // The database is shared by multiple workloads, so only make the asserts that apply
+ // when the collection is owned by an individual workload.
+ assertLevel = AssertLevel.OWN_COLL;
}
+ if (clusterOptions.sameCollection) {
+ // The collection is shared by multiple workloads, so only make the asserts that always
+ // apply.
+ assertLevel = AssertLevel.ALWAYS;
+ }
+ globalAssertLevel = assertLevel;
- function runWorkloads(workloads,
- {cluster: clusterOptions = {}, execution: executionOptions = {}} = {}) {
- assert.gt(workloads.length, 0, 'need at least one workload to run');
-
- const executionMode = {serial: true};
- validateExecutionOptions(executionMode, executionOptions);
- Object.freeze(executionOptions); // immutable after validation (and normalization)
-
- // Determine how strong to make assertions while simultaneously executing different
- // workloads.
- let assertLevel = AssertLevel.OWN_DB;
- if (clusterOptions.sameDB) {
- // The database is shared by multiple workloads, so only make the asserts that apply
- // when the collection is owned by an individual workload.
- assertLevel = AssertLevel.OWN_COLL;
- }
- if (clusterOptions.sameCollection) {
- // The collection is shared by multiple workloads, so only make the asserts that always
- // apply.
- assertLevel = AssertLevel.ALWAYS;
- }
- globalAssertLevel = assertLevel;
-
- const context = {};
- const applyMultipliers = true;
- loadWorkloadContext(workloads, context, executionOptions, applyMultipliers);
+ const context = {};
+ const applyMultipliers = true;
+ loadWorkloadContext(workloads, context, executionOptions, applyMultipliers);
- // Constructing a Cluster instance calls its internal validateClusterOptions() function,
- // which fills in any properties that aren't explicitly present in 'clusterOptions'. We do
- // this before constructing a ThreadManager instance to make its dependency on the
- // 'clusterOptions' being filled in explicit.
- const cluster = new Cluster(clusterOptions);
- const threadMgr = new ThreadManager(clusterOptions, executionMode);
+ // Constructing a Cluster instance calls its internal validateClusterOptions() function,
+ // which fills in any properties that aren't explicitly present in 'clusterOptions'. We do
+ // this before constructing a ThreadManager instance to make its dependency on the
+ // 'clusterOptions' being filled in explicit.
+ const cluster = new Cluster(clusterOptions);
+ const threadMgr = new ThreadManager(clusterOptions, executionMode);
- Random.setRandomSeed(clusterOptions.seed);
+ Random.setRandomSeed(clusterOptions.seed);
- const errors = [];
- const cleanup = [];
- let teardownFailed = false;
- let startTime = Date.now(); // Initialize in case setupWorkload fails below.
- let totalTime;
+ const errors = [];
+ const cleanup = [];
+ let teardownFailed = false;
+ let startTime = Date.now(); // Initialize in case setupWorkload fails below.
+ let totalTime;
- cluster.setup();
+ cluster.setup();
- jsTest.log('Workload(s) started: ' + workloads.join(' '));
+ jsTest.log('Workload(s) started: ' + workloads.join(' '));
- prepareCollections(workloads, context, cluster, clusterOptions, executionOptions);
+ prepareCollections(workloads, context, cluster, clusterOptions, executionOptions);
- try {
- // Set up the thread manager for this set of workloads.
- startTime = Date.now();
+ try {
+ // Set up the thread manager for this set of workloads.
+ startTime = Date.now();
- {
- const maxAllowedThreads = 100 * executionOptions.threadMultiplier;
- threadMgr.init(workloads, context, maxAllowedThreads);
- }
+ {
+ const maxAllowedThreads = 100 * executionOptions.threadMultiplier;
+ threadMgr.init(workloads, context, maxAllowedThreads);
+ }
- // Call each workload's setup function.
- workloads.forEach(function(workload) {
- // Define "iterations" and "threadCount" properties on the workload's $config.data
- // object so that they can be used within its setup(), teardown(), and state
- // functions. This must happen after calling threadMgr.init() in case the thread
- // counts needed to be scaled down.
- setIterations(context[workload].config);
- setThreadCount(context[workload].config);
-
- setupWorkload(workload, context, cluster);
- cleanup.push(workload);
- });
-
- // Await replication after running the $config.setup() function when stepdowns are
- // permitted to ensure its effects aren't rolled back.
- if (cluster.isReplication() && executionOptions.stepdownFiles !== undefined) {
- cluster.awaitReplication();
- }
+ // Call each workload's setup function.
+ workloads.forEach(function(workload) {
+ // Define "iterations" and "threadCount" properties on the workload's $config.data
+ // object so that they can be used within its setup(), teardown(), and state
+ // functions. This must happen after calling threadMgr.init() in case the thread
+ // counts needed to be scaled down.
+ setIterations(context[workload].config);
+ setThreadCount(context[workload].config);
+
+ setupWorkload(workload, context, cluster);
+ cleanup.push(workload);
+ });
+
+ // Await replication after running the $config.setup() function when stepdowns are
+ // permitted to ensure its effects aren't rolled back.
+ if (cluster.isReplication() && executionOptions.stepdownFiles !== undefined) {
+ cluster.awaitReplication();
+ }
- // Transactions run at snapshot read concern unless defaultTransactionReadConcernLevel
- // is set to another level.
- const transactionsWouldUseSnapshotReadConcern =
- !TestData.hasOwnProperty("defaultTransactionReadConcernLevel") ||
- TestData.defaultTransactionReadConcernLevel === "snapshot";
-
- // Synchronize the cluster times across all routers if the tests will be overriden to
- // use transactions, so the earliest global snapshots chosen by each router will include
- // the effects of each setup function. This is only required for snapshot read concern.
- if (cluster.isSharded() && TestData.runInsideTransaction &&
- transactionsWouldUseSnapshotReadConcern) {
- cluster.synchronizeMongosClusterTimes();
- }
+ // Transactions run at snapshot read concern unless defaultTransactionReadConcernLevel
+ // is set to another level.
+ const transactionsWouldUseSnapshotReadConcern =
+ !TestData.hasOwnProperty("defaultTransactionReadConcernLevel") ||
+ TestData.defaultTransactionReadConcernLevel === "snapshot";
+
+ // Synchronize the cluster times across all routers if the tests will be overriden to
+ // use transactions, so the earliest global snapshots chosen by each router will include
+ // the effects of each setup function. This is only required for snapshot read concern.
+ if (cluster.isSharded() && TestData.runInsideTransaction &&
+ transactionsWouldUseSnapshotReadConcern) {
+ cluster.synchronizeMongosClusterTimes();
+ }
- // After the $config.setup() function has been called, it is safe for the stepdown
- // thread to start running. The main thread won't attempt to interact with the cluster
- // until all of the spawned worker threads have finished.
- //
+ // After the $config.setup() function has been called, it is safe for the stepdown
+ // thread to start running. The main thread won't attempt to interact with the cluster
+ // until all of the spawned worker threads have finished.
+ //
- // Indicate that the stepdown thread can run. It is unnecessary for the stepdown thread
- // to indicate that it is going to start running because it will eventually after the
- // worker threads have started.
- if (executionOptions.stepdownFiles !== undefined) {
- writeFile(executionOptions.stepdownFiles.permitted, '');
- }
+ // Indicate that the stepdown thread can run. It is unnecessary for the stepdown thread
+ // to indicate that it is going to start running because it will eventually after the
+ // worker threads have started.
+ if (executionOptions.stepdownFiles !== undefined) {
+ writeFile(executionOptions.stepdownFiles.permitted, '');
+ }
- // Since the worker threads may be running with causal consistency enabled, we set the
- // initial clusterTime and initial operationTime for the sessions they'll create so that
- // they are guaranteed to observe the effects of the workload's $config.setup() function
- // being called.
- if (typeof executionOptions.sessionOptions === 'object' &&
- executionOptions.sessionOptions !== null) {
- // We only start a session for the worker threads and never start one for the main
- // thread. We can therefore get the clusterTime and operationTime tracked by the
- // underlying DummyDriverSession through any DB instance (i.e. the "test" database
- // here was chosen arbitrarily).
- const session = cluster.getDB('test').getSession();
-
- // JavaScript objects backed by C++ objects (e.g. BSON values from a command
- // response) do not serialize correctly when passed through the ScopedThread
- // constructor. To work around this behavior, we instead pass a stringified form of
- // the JavaScript object through the ScopedThread constructor and use eval() to
- // rehydrate it.
- executionOptions.sessionOptions.initialClusterTime =
- tojson(session.getClusterTime());
- executionOptions.sessionOptions.initialOperationTime =
- tojson(session.getOperationTime());
- }
+ // Since the worker threads may be running with causal consistency enabled, we set the
+ // initial clusterTime and initial operationTime for the sessions they'll create so that
+ // they are guaranteed to observe the effects of the workload's $config.setup() function
+ // being called.
+ if (typeof executionOptions.sessionOptions === 'object' &&
+ executionOptions.sessionOptions !== null) {
+ // We only start a session for the worker threads and never start one for the main
+ // thread. We can therefore get the clusterTime and operationTime tracked by the
+ // underlying DummyDriverSession through any DB instance (i.e. the "test" database
+ // here was chosen arbitrarily).
+ const session = cluster.getDB('test').getSession();
+
+ // JavaScript objects backed by C++ objects (e.g. BSON values from a command
+ // response) do not serialize correctly when passed through the ScopedThread
+ // constructor. To work around this behavior, we instead pass a stringified form of
+ // the JavaScript object through the ScopedThread constructor and use eval() to
+ // rehydrate it.
+ executionOptions.sessionOptions.initialClusterTime = tojson(session.getClusterTime());
+ executionOptions.sessionOptions.initialOperationTime =
+ tojson(session.getOperationTime());
+ }
+ try {
try {
- try {
- // Start this set of worker threads.
- threadMgr.spawnAll(cluster, executionOptions);
- // Allow 20% of the threads to fail. This allows the workloads to run on
- // underpowered test hosts.
- threadMgr.checkFailed(0.2);
- } finally {
- // Threads must be joined before destruction, so do this even in the presence of
- // exceptions.
- errors.push(...threadMgr.joinAll().map(
- e => new WorkloadFailure(
- e.err, e.stack, e.tid, 'Foreground ' + e.workloads.join(' '))));
- }
+ // Start this set of worker threads.
+ threadMgr.spawnAll(cluster, executionOptions);
+ // Allow 20% of the threads to fail. This allows the workloads to run on
+ // underpowered test hosts.
+ threadMgr.checkFailed(0.2);
} finally {
- // Until we are guaranteed that the stepdown thread isn't running, it isn't safe for
- // the $config.teardown() function to be called. We should signal to resmoke.py that
- // the stepdown thread should stop running and wait for the stepdown thread to
- // signal that it has stopped.
- //
- // Signal to the stepdown thread to stop stepping down the cluster.
- if (executionOptions.stepdownFiles !== undefined) {
- writeFile(executionOptions.stepdownFiles.idleRequest, '');
-
- // Wait for the acknowledgement file to be created by the stepdown thread.
- assert.soonNoExcept(function() {
- // The cat() function will throw an exception if the file isn't found.
- cat(executionOptions.stepdownFiles.idleAck);
- return true;
- }, "stepdown still in progress");
- }
+ // Threads must be joined before destruction, so do this even in the presence of
+ // exceptions.
+ errors.push(...threadMgr.joinAll().map(
+ e => new WorkloadFailure(
+ e.err, e.stack, e.tid, 'Foreground ' + e.workloads.join(' '))));
}
} finally {
- if (cluster.shouldPerformContinuousStepdowns()) {
- cluster.reestablishConnectionsAfterFailover();
+ // Until we are guaranteed that the stepdown thread isn't running, it isn't safe for
+ // the $config.teardown() function to be called. We should signal to resmoke.py that
+ // the stepdown thread should stop running and wait for the stepdown thread to
+ // signal that it has stopped.
+ //
+ // Signal to the stepdown thread to stop stepping down the cluster.
+ if (executionOptions.stepdownFiles !== undefined) {
+ writeFile(executionOptions.stepdownFiles.idleRequest, '');
+
+ // Wait for the acknowledgement file to be created by the stepdown thread.
+ assert.soonNoExcept(function() {
+ // The cat() function will throw an exception if the file isn't found.
+ cat(executionOptions.stepdownFiles.idleAck);
+ return true;
+ }, "stepdown still in progress");
}
- // Call each workload's teardown function. After all teardowns have completed check if
- // any of them failed.
- const cleanupResults = cleanup.map(
- workload => cleanupWorkload(workload, context, cluster, errors, 'Foreground'));
- teardownFailed = cleanupResults.some(success => (success === false));
-
- totalTime = Date.now() - startTime;
- jsTest.log('Workload(s) completed in ' + totalTime + ' ms: ' + workloads.join(' '));
}
-
- // Throw any existing errors so that resmoke.py can abort its execution of the test suite.
- throwError(errors);
-
- cluster.teardown();
- }
-
- if (typeof db === 'undefined') {
- throw new Error(
- 'resmoke_runner.js must be run with the mongo shell already connected to the database');
- }
-
- const clusterOptions = {
- replication: {enabled: false},
- sharded: {enabled: false},
- };
-
- // The TestData.discoverTopoloy is false when we only care about connecting to either a
- // standalone or primary node in a replica set.
- if (TestData.discoverTopology !== false) {
- const topology = DiscoverTopology.findConnectedNodes(db.getMongo());
-
- if (topology.type === Topology.kReplicaSet) {
- clusterOptions.replication.enabled = true;
- clusterOptions.replication.numNodes = topology.nodes.length;
- } else if (topology.type === Topology.kShardedCluster) {
- clusterOptions.replication.enabled = TestData.usingReplicaSetShards || false;
- clusterOptions.sharded.enabled = true;
- clusterOptions.sharded.enableAutoSplit = TestData.hasOwnProperty('runningWithAutoSplit')
- ? TestData.runningWithAutoSplit
- : true;
- clusterOptions.sharded.enableBalancer = TestData.hasOwnProperty('runningWithBalancer')
- ? TestData.runningWithBalancer
- : true;
- clusterOptions.sharded.numMongos = topology.mongos.nodes.length;
- clusterOptions.sharded.numShards = Object.keys(topology.shards).length;
- clusterOptions.sharded.stepdownOptions = {};
- clusterOptions.sharded.stepdownOptions.configStepdown =
- TestData.runningWithConfigStepdowns || false;
- clusterOptions.sharded.stepdownOptions.shardStepdown =
- TestData.runningWithShardStepdowns || false;
- } else if (topology.type !== Topology.kStandalone) {
- throw new Error('Unrecognized topology format: ' + tojson(topology));
+ } finally {
+ if (cluster.shouldPerformContinuousStepdowns()) {
+ cluster.reestablishConnectionsAfterFailover();
}
+ // Call each workload's teardown function. After all teardowns have completed check if
+ // any of them failed.
+ const cleanupResults = cleanup.map(
+ workload => cleanupWorkload(workload, context, cluster, errors, 'Foreground'));
+ teardownFailed = cleanupResults.some(success => (success === false));
+
+ totalTime = Date.now() - startTime;
+ jsTest.log('Workload(s) completed in ' + totalTime + ' ms: ' + workloads.join(' '));
}
- clusterOptions.sameDB = TestData.sameDB;
- clusterOptions.sameCollection = TestData.sameCollection;
+ // Throw any existing errors so that resmoke.py can abort its execution of the test suite.
+ throwError(errors);
+
+ cluster.teardown();
+}
+
+if (typeof db === 'undefined') {
+ throw new Error(
+ 'resmoke_runner.js must be run with the mongo shell already connected to the database');
+}
+
+const clusterOptions = {
+ replication: {enabled: false},
+ sharded: {enabled: false},
+};
+
+// The TestData.discoverTopoloy is false when we only care about connecting to either a
+// standalone or primary node in a replica set.
+if (TestData.discoverTopology !== false) {
+ const topology = DiscoverTopology.findConnectedNodes(db.getMongo());
+
+ if (topology.type === Topology.kReplicaSet) {
+ clusterOptions.replication.enabled = true;
+ clusterOptions.replication.numNodes = topology.nodes.length;
+ } else if (topology.type === Topology.kShardedCluster) {
+ clusterOptions.replication.enabled = TestData.usingReplicaSetShards || false;
+ clusterOptions.sharded.enabled = true;
+ clusterOptions.sharded.enableAutoSplit =
+ TestData.hasOwnProperty('runningWithAutoSplit') ? TestData.runningWithAutoSplit : true;
+ clusterOptions.sharded.enableBalancer =
+ TestData.hasOwnProperty('runningWithBalancer') ? TestData.runningWithBalancer : true;
+ clusterOptions.sharded.numMongos = topology.mongos.nodes.length;
+ clusterOptions.sharded.numShards = Object.keys(topology.shards).length;
+ clusterOptions.sharded.stepdownOptions = {};
+ clusterOptions.sharded.stepdownOptions.configStepdown =
+ TestData.runningWithConfigStepdowns || false;
+ clusterOptions.sharded.stepdownOptions.shardStepdown =
+ TestData.runningWithShardStepdowns || false;
+ } else if (topology.type !== Topology.kStandalone) {
+ throw new Error('Unrecognized topology format: ' + tojson(topology));
+ }
+}
- let workloads = TestData.fsmWorkloads;
+clusterOptions.sameDB = TestData.sameDB;
+clusterOptions.sameCollection = TestData.sameCollection;
- let sessionOptions = {};
- if (TestData.runningWithCausalConsistency !== undefined) {
- // Explicit sessions are causally consistent by default, so causal consistency has to be
- // explicitly disabled.
- sessionOptions.causalConsistency = TestData.runningWithCausalConsistency;
+let workloads = TestData.fsmWorkloads;
- if (TestData.runningWithCausalConsistency) {
- sessionOptions.readPreference = {mode: 'secondary'};
- }
- }
+let sessionOptions = {};
+if (TestData.runningWithCausalConsistency !== undefined) {
+ // Explicit sessions are causally consistent by default, so causal consistency has to be
+ // explicitly disabled.
+ sessionOptions.causalConsistency = TestData.runningWithCausalConsistency;
- if (TestData.runningWithConfigStepdowns || TestData.runningWithShardStepdowns) {
- sessionOptions.retryWrites = true;
- }
-
- const executionOptions = {dbNamePrefix: TestData.dbNamePrefix || ""};
- const resmokeDbPathPrefix = TestData.resmokeDbPathPrefix || ".";
-
- // The stepdown file names need to match the same construction as found in
- // buildscripts/resmokelib/testing/hooks/stepdown.py.
- if (TestData.useStepdownPermittedFile) {
- executionOptions.stepdownFiles = {
- permitted: resmokeDbPathPrefix + '/permitted',
- idleRequest: resmokeDbPathPrefix + '/idle_request',
- idleAck: resmokeDbPathPrefix + '/idle_ack',
- };
+ if (TestData.runningWithCausalConsistency) {
+ sessionOptions.readPreference = {mode: 'secondary'};
}
+}
+
+if (TestData.runningWithConfigStepdowns || TestData.runningWithShardStepdowns) {
+ sessionOptions.retryWrites = true;
+}
+
+const executionOptions = {
+ dbNamePrefix: TestData.dbNamePrefix || ""
+};
+const resmokeDbPathPrefix = TestData.resmokeDbPathPrefix || ".";
+
+// The stepdown file names need to match the same construction as found in
+// buildscripts/resmokelib/testing/hooks/stepdown.py.
+if (TestData.useStepdownPermittedFile) {
+ executionOptions.stepdownFiles = {
+ permitted: resmokeDbPathPrefix + '/permitted',
+ idleRequest: resmokeDbPathPrefix + '/idle_request',
+ idleAck: resmokeDbPathPrefix + '/idle_ack',
+ };
+}
- if (Object.keys(sessionOptions).length > 0 || TestData.runningWithSessions) {
- executionOptions.sessionOptions = sessionOptions;
- }
+if (Object.keys(sessionOptions).length > 0 || TestData.runningWithSessions) {
+ executionOptions.sessionOptions = sessionOptions;
+}
- runWorkloads(workloads, {cluster: clusterOptions, execution: executionOptions});
+runWorkloads(workloads, {cluster: clusterOptions, execution: executionOptions});
})();
diff --git a/jstests/concurrency/fsm_libs/runner.js b/jstests/concurrency/fsm_libs/runner.js
index 3568e8cae4a..df01c8afbfd 100644
--- a/jstests/concurrency/fsm_libs/runner.js
+++ b/jstests/concurrency/fsm_libs/runner.js
@@ -8,15 +8,14 @@ load('jstests/concurrency/fsm_utils/name_utils.js'); // for uniqueCollName and
load('jstests/concurrency/fsm_utils/setup_teardown_functions.js');
var runner = (function() {
-
function validateExecutionMode(mode) {
var allowedKeys = ['composed', 'parallel', 'serial'];
Object.keys(mode).forEach(function(option) {
assert.contains(option,
allowedKeys,
- 'invalid option: ' + tojson(option) + '; valid options are: ' +
- tojson(allowedKeys));
+ 'invalid option: ' + tojson(option) +
+ '; valid options are: ' + tojson(allowedKeys));
});
mode.composed = mode.composed || false;
@@ -61,8 +60,8 @@ var runner = (function() {
Object.keys(options).forEach(function(option) {
assert.contains(option,
allowedKeys,
- 'invalid option: ' + tojson(option) + '; valid options are: ' +
- tojson(allowedKeys));
+ 'invalid option: ' + tojson(option) +
+ '; valid options are: ' + tojson(allowedKeys));
});
if (typeof options.subsetSize !== 'undefined') {
@@ -130,8 +129,8 @@ var runner = (function() {
Object.keys(options).forEach(function(option) {
assert.contains(option,
allowedKeys,
- 'invalid option: ' + tojson(option) + '; valid options are: ' +
- tojson(allowedKeys));
+ 'invalid option: ' + tojson(option) +
+ '; valid options are: ' + tojson(allowedKeys));
});
if (typeof options.dropDatabaseBlacklist !== 'undefined') {
@@ -752,7 +751,6 @@ var runner = (function() {
loadWorkloadContext,
}
};
-
})();
var runWorkloadsSerially = runner.serial;
diff --git a/jstests/concurrency/fsm_libs/worker_thread.js b/jstests/concurrency/fsm_libs/worker_thread.js
index f5a827794c6..cb172148fe0 100644
--- a/jstests/concurrency/fsm_libs/worker_thread.js
+++ b/jstests/concurrency/fsm_libs/worker_thread.js
@@ -6,7 +6,6 @@ load('jstests/concurrency/fsm_libs/parse_config.js'); // for parseConfig
load('jstests/libs/specific_secondary_reader_mongo.js');
var workerThread = (function() {
-
// workloads = list of workload filenames
// args.tid = the thread identifier
// args.data = map of workload -> 'this' parameter passed to the FSM state functions
@@ -193,8 +192,7 @@ var workerThread = (function() {
// them here as non-configurable and non-writable.
Object.defineProperties(data, {
'iterations': {configurable: false, writable: false, value: data.iterations},
- 'threadCount':
- {configurable: false, writable: false, value: data.threadCount}
+ 'threadCount': {configurable: false, writable: false, value: data.threadCount}
});
data.tid = args.tid;