summaryrefslogtreecommitdiff
path: root/jstests/concurrency/fsm_libs
diff options
context:
space:
mode:
authorRobert Guo <robert.guo@10gen.com>2018-06-05 17:11:24 -0400
committerRobert Guo <robert.guo@10gen.com>2018-06-08 09:45:35 -0400
commitfe841d1b78f4d03bd8fbc534a2b9ea42abb02190 (patch)
tree2c8f50a051258693e28a71040b84b50abe70182b /jstests/concurrency/fsm_libs
parente361973f0e994d7c5da603cb6436fd96f7180127 (diff)
downloadmongo-fe841d1b78f4d03bd8fbc534a2b9ea42abb02190.tar.gz
SERVER-35389 remove dead code from FSM suite
Diffstat (limited to 'jstests/concurrency/fsm_libs')
-rw-r--r--jstests/concurrency/fsm_libs/cluster.js123
-rw-r--r--jstests/concurrency/fsm_libs/errors.js15
-rw-r--r--jstests/concurrency/fsm_libs/resmoke_runner.js1
-rw-r--r--jstests/concurrency/fsm_libs/runner.js179
-rw-r--r--jstests/concurrency/fsm_libs/thread_mgr.js15
5 files changed, 54 insertions, 279 deletions
diff --git a/jstests/concurrency/fsm_libs/cluster.js b/jstests/concurrency/fsm_libs/cluster.js
index 73de632ef7f..330507a448d 100644
--- a/jstests/concurrency/fsm_libs/cluster.js
+++ b/jstests/concurrency/fsm_libs/cluster.js
@@ -48,7 +48,6 @@ var Cluster = function(options) {
'sharded.stepdownOptions.configStepdown',
'sharded.stepdownOptions.shardStepdown',
'teardownFunctions',
- 'useExistingConnectionAsSeed',
];
getObjectKeys(options).forEach(function(option) {
@@ -167,30 +166,12 @@ var Cluster = function(options) {
'Expected teardownFunctions.config to be an array');
assert(options.teardownFunctions.config.every(f => (typeof f === 'function')),
'Expected teardownFunctions.config to be an array of functions');
-
- options.useExistingConnectionAsSeed = options.useExistingConnectionAsSeed || false;
- assert.eq('boolean', typeof options.useExistingConnectionAsSeed);
- }
-
- function makeReplSetTestConfig(numReplSetNodes, firstNodeOnlyVote) {
- const REPL_SET_VOTING_LIMIT = 7;
- // Workaround for SERVER-26893 to specify when numReplSetNodes > REPL_SET_VOTING_LIMIT.
- var firstNodeNotVoting = firstNodeOnlyVote ? 1 : REPL_SET_VOTING_LIMIT;
- var rstConfig = [];
- for (var i = 0; i < numReplSetNodes; i++) {
- rstConfig[i] = {};
- if (i >= firstNodeNotVoting) {
- rstConfig[i].rsConfig = {priority: 0, votes: 0};
- }
- }
- return rstConfig;
}
var conn;
var secondaryConns;
var st;
- var rawST; // The raw ShardingTest object for test suites not using resmoke fixtures.
var initialized = false;
var clusterStartTime;
@@ -211,86 +192,25 @@ var Cluster = function(options) {
}
if (options.sharded.enabled) {
- if (options.useExistingConnectionAsSeed) {
- st = new FSMShardingTest(`mongodb://${db.getMongo().host}`);
- } else {
- // TODO: allow 'options' to specify the number of shards and mongos processes
- var shardConfig = {
- shards: options.sharded.numShards,
- mongos: options.sharded.numMongos,
- verbose: verbosityLevel,
- other: {
- enableAutoSplit: options.sharded.enableAutoSplit,
- enableBalancer: options.sharded.enableBalancer,
- }
- };
-
- // TODO: allow 'options' to specify an 'rs' config
- if (options.replication.enabled) {
- shardConfig.rs = {
- nodes: makeReplSetTestConfig(options.replication.numNodes,
- !this.shouldPerformContinuousStepdowns()),
- // Increase the oplog size (in MB) to prevent rollover
- // during write-heavy workloads
- oplogSize: 1024,
- // Set the electionTimeoutMillis to 1 day to prevent unintended elections
- settings: {electionTimeoutMillis: 60 * 60 * 24 * 1000},
- verbose: verbosityLevel
- };
- shardConfig.rsOptions = {};
- }
-
- if (this.shouldPerformContinuousStepdowns()) {
- load('jstests/libs/override_methods/continuous_stepdown.js');
- ContinuousStepdown.configure(options.sharded.stepdownOptions);
- }
-
- rawST = new ShardingTest(shardConfig);
- const hostStr = "mongodb://" + rawST._mongos.map(conn => conn.host).join(",");
-
- st = new FSMShardingTest(hostStr);
- }
+ st = new FSMShardingTest(`mongodb://${db.getMongo().host}`);
conn = st.s(0); // First mongos
- this.teardown = function teardown(opts) {
+ this.teardown = function teardown() {
options.teardownFunctions.mongod.forEach(this.executeOnMongodNodes);
options.teardownFunctions.mongos.forEach(this.executeOnMongosNodes);
options.teardownFunctions.config.forEach(this.executeOnConfigNodes);
+ };
- // Skip checking uuids in teardown if performing continuous stepdowns. The override
- // uses cached connections and expects to run commands against primaries, which is
- // not compatible with stepdowns.
- if (this.shouldPerformContinuousStepdowns()) {
- TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
- }
-
- if (!options.useExistingConnectionAsSeed) {
- rawST.stop(opts);
+ this.reestablishConnectionsAfterFailover = function() {
+ // Call getPrimary() to re-establish the connections in FSMShardingTest
+ // as it is not a transparent proxy for ShardingTest.
+ st._configsvr.getPrimary();
+ for (let rst of st._shard_rsts) {
+ rst.getPrimary();
}
};
- if (this.shouldPerformContinuousStepdowns()) {
- this.startContinuousFailover = function() {
- rawST.startContinuousFailover();
- };
-
- this.reestablishConnectionsAfterFailover = function() {
- // Call getPrimary() to re-establish the connections in FSMShardingTest
- // as it is not a transparent proxy for SharingTest/rawST.
- st._configsvr.getPrimary();
- for (let rst of st._shard_rsts) {
- rst.getPrimary();
- }
- };
-
- this.stopContinuousFailover = function() {
- rawST.stopContinuousFailover(
- {waitForPrimary: true, waitForMongosRetarget: true});
- this.reestablishConnectionsAfterFailover();
- };
- }
-
// Save all mongos, mongod, and ReplSet connections (if any).
var i;
@@ -312,35 +232,14 @@ var Cluster = function(options) {
}
} else if (options.replication.enabled) {
- var replSetConfig = {
- nodes: makeReplSetTestConfig(options.replication.numNodes,
- !this.shouldPerformContinuousStepdowns()),
- // Increase the oplog size (in MB) to prevent rollover during write-heavy workloads
- oplogSize: 1024,
- nodeOptions: {verbose: verbosityLevel},
- // Set the electionTimeoutMillis to 1 day to prevent unintended elections
- settings: {electionTimeoutMillis: 60 * 60 * 24 * 1000}
- };
-
- if (!options.useExistingConnectionAsSeed) {
- rst = new ReplSetTest(replSetConfig);
- rst.startSet();
-
- rst.initiate();
- rst.awaitSecondaryNodes();
- } else {
- rst = new ReplSetTest(db.getMongo().host);
- }
+ rst = new ReplSetTest(db.getMongo().host);
conn = rst.getPrimary();
secondaryConns = rst.getSecondaries();
replSets = [rst];
- this.teardown = function teardown(opts) {
+ this.teardown = function teardown() {
options.teardownFunctions.mongod.forEach(this.executeOnMongodNodes);
- if (!options.useExistingConnectionAsSeed) {
- rst.stopSet(undefined, undefined, opts);
- }
};
this._addReplicaSetConns(rst);
diff --git a/jstests/concurrency/fsm_libs/errors.js b/jstests/concurrency/fsm_libs/errors.js
deleted file mode 100644
index 1b7706962e1..00000000000
--- a/jstests/concurrency/fsm_libs/errors.js
+++ /dev/null
@@ -1,15 +0,0 @@
-'use strict';
-
-/**
- * errors.js
- *
- * This file defines custom errors.
- */
-
-function IterationEnd(message) {
- this.name = 'IterationEnd';
- this.message = message || 'Iteration instructed to terminate';
- this.stack = (new Error()).stack;
-}
-
-IterationEnd.prototype = Object.create(Error.prototype);
diff --git a/jstests/concurrency/fsm_libs/resmoke_runner.js b/jstests/concurrency/fsm_libs/resmoke_runner.js
index 1101bf4cd9f..cbf24e694ed 100644
--- a/jstests/concurrency/fsm_libs/resmoke_runner.js
+++ b/jstests/concurrency/fsm_libs/resmoke_runner.js
@@ -199,7 +199,6 @@
const clusterOptions = {
replication: {enabled: false},
sharded: {enabled: false},
- useExistingConnectionAsSeed: true,
};
const topology = DiscoverTopology.findConnectedNodes(db.getMongo());
diff --git a/jstests/concurrency/fsm_libs/runner.js b/jstests/concurrency/fsm_libs/runner.js
index 9a729ba6008..c40d63f556b 100644
--- a/jstests/concurrency/fsm_libs/runner.js
+++ b/jstests/concurrency/fsm_libs/runner.js
@@ -2,7 +2,6 @@
load('jstests/concurrency/fsm_libs/assert.js');
load('jstests/concurrency/fsm_libs/cluster.js');
-load('jstests/concurrency/fsm_libs/errors.js'); // for IterationEnd
load('jstests/concurrency/fsm_libs/parse_config.js');
load('jstests/concurrency/fsm_libs/thread_mgr.js');
load('jstests/concurrency/fsm_utils/name_utils.js'); // for uniqueCollName and uniqueDBName
@@ -43,7 +42,6 @@ var runner = (function() {
function validateExecutionOptions(mode, options) {
var allowedKeys = [
- 'backgroundWorkloads',
'dbNamePrefix',
'iterationMultiplier',
'sessionOptions',
@@ -91,10 +89,6 @@ var runner = (function() {
assert.lte(options.composeProb, 1);
}
- options.backgroundWorkloads = options.backgroundWorkloads || [];
- assert(Array.isArray(options.backgroundWorkloads),
- 'expected backgroundWorkloads to be an array');
-
if (typeof options.dbNamePrefix !== 'undefined') {
assert.eq(
'string', typeof options.dbNamePrefix, 'expected dbNamePrefix to be a string');
@@ -444,7 +438,7 @@ var runner = (function() {
});
}
- function printWorkloadSchedule(schedule, backgroundWorkloads) {
+ function printWorkloadSchedule(schedule) {
// Print out the entire schedule of workloads to make it easier to run the same
// schedule when debugging test failures.
jsTest.log('The entire schedule of FSM workloads:');
@@ -452,10 +446,6 @@ var runner = (function() {
// Note: We use printjsononeline (instead of just plain printjson) to make it
// easier to reuse the output in variable assignments.
printjsononeline(schedule);
- if (backgroundWorkloads.length > 0) {
- jsTest.log('Background Workloads:');
- printjsononeline(backgroundWorkloads);
- }
jsTest.log('End of schedule');
}
@@ -583,30 +573,18 @@ var runner = (function() {
tojson(session.getOperationTime());
}
- if (cluster.shouldPerformContinuousStepdowns()) {
- cluster.startContinuousFailover();
- }
-
try {
- try {
- // Start this set of foreground workload threads.
- threadMgr.spawnAll(cluster, executionOptions);
- // Allow 20% of foreground threads to fail. This allows the workloads to run on
- // underpowered test hosts.
- threadMgr.checkFailed(0.2);
- } finally {
- // Threads must be joined before destruction, so do this
- // even in the presence of exceptions.
- errors.push(...threadMgr.joinAll().map(
- e => new WorkloadFailure(
- e.err, e.stack, e.tid, 'Foreground ' + e.workloads.join(' '))));
- }
+ // Start this set of foreground workload threads.
+ threadMgr.spawnAll(cluster, executionOptions);
+ // Allow 20% of foreground threads to fail. This allows the workloads to run on
+ // underpowered test hosts.
+ threadMgr.checkFailed(0.2);
} finally {
- if (cluster.shouldPerformContinuousStepdowns()) {
- // Suspend the stepdown threads prior to calling cleanupWorkload() to avoid
- // causing a failover to happen while the data consistency checks are running.
- cluster.stopContinuousFailover();
- }
+ // Threads must be joined before destruction, so do this
+ // even in the presence of exceptions.
+ errors.push(...threadMgr.joinAll().map(
+ e => new WorkloadFailure(
+ e.err, e.stack, e.tid, 'Foreground ' + e.workloads.join(' '))));
}
} finally {
// Call each foreground workload's teardown function. After all teardowns have completed
@@ -676,11 +654,6 @@ var runner = (function() {
loadWorkloadContext(workloads, context, executionOptions, true /* applyMultipliers */);
var threadMgr = new ThreadManager(clusterOptions, executionMode);
- var bgContext = {};
- var bgWorkloads = executionOptions.backgroundWorkloads;
- loadWorkloadContext(bgWorkloads, bgContext, executionOptions, false /* applyMultipliers */);
- var bgThreadMgr = new ThreadManager(clusterOptions);
-
var cluster = new Cluster(clusterOptions);
if (cluster.isSharded()) {
useDropDistLockFailPoint(cluster, clusterOptions);
@@ -688,8 +661,6 @@ var runner = (function() {
cluster.setup();
// Filter out workloads that need to be skipped.
- bgWorkloads =
- bgWorkloads.filter(workload => !shouldSkipWorkload(workload, bgContext, cluster));
workloads = workloads.filter(workload => !shouldSkipWorkload(workload, context, cluster));
// Clean up the state left behind by other tests in the concurrency suite
@@ -711,112 +682,46 @@ var runner = (function() {
var maxAllowedThreads = 100 * executionOptions.threadMultiplier;
Random.setRandomSeed(clusterOptions.seed);
- var bgCleanup = [];
var errors = [];
var configServerData = [];
- let activeException = false;
try {
- prepareCollections(bgWorkloads, bgContext, cluster, clusterOptions, executionOptions);
-
- // Set up the background thread manager for background workloads.
- bgThreadMgr.init(bgWorkloads, bgContext, maxAllowedThreads);
-
- // Call each background workload's setup function.
- bgWorkloads.forEach(function(bgWorkload) {
- // Define "iterations" and "threadCount" properties on the background workload's
- // $config.data object so that they can be used within its setup(), teardown(), and
- // state functions. This must happen after calling bgThreadMgr.init() in case the
- // thread counts needed to be scaled down.
- setIterations(bgContext[bgWorkload].config);
- setThreadCount(bgContext[bgWorkload].config);
+ var schedule = scheduleWorkloads(workloads, executionMode, executionOptions);
+ printWorkloadSchedule(schedule);
+
+ schedule.forEach(function(workloads) {
+ // Make a deep copy of the $config object for each of the workloads that are
+ // going to be run to ensure the workload starts with a fresh version of its
+ // $config.data. This is necessary because $config.data keeps track of
+ // thread-local state that may be updated during a workload's setup(),
+ // teardown(), and state functions.
+ var groupContext = {};
+ workloads.forEach(function(workload) {
+ groupContext[workload] = Object.extend({}, context[workload], true);
+ });
- setupWorkload(bgWorkload, bgContext, cluster);
- bgCleanup.push(bgWorkload);
+ // Run the next group of workloads in the schedule.
+ runWorkloadGroup(threadMgr,
+ workloads,
+ groupContext,
+ cluster,
+ clusterOptions,
+ executionMode,
+ executionOptions,
+ errors,
+ maxAllowedThreads,
+ dbHashBlacklist,
+ configServerData,
+ cleanupOptions);
});
- try {
- // Start background workload threads.
- bgThreadMgr.spawnAll(cluster, executionOptions);
- bgThreadMgr.checkFailed(0);
-
- var schedule = scheduleWorkloads(workloads, executionMode, executionOptions);
- printWorkloadSchedule(schedule, bgWorkloads);
-
- schedule.forEach(function(workloads) {
- // Check if any background workloads have failed.
- if (bgThreadMgr.checkForErrors()) {
- var msg = 'Background workload failed before all foreground workloads ran';
- throw new IterationEnd(msg);
- }
-
- // Make a deep copy of the $config object for each of the workloads that are
- // going to be run to ensure the workload starts with a fresh version of its
- // $config.data. This is necessary because $config.data keeps track of
- // thread-local state that may be updated during a workload's setup(),
- // teardown(), and state functions.
- var groupContext = {};
- workloads.forEach(function(workload) {
- groupContext[workload] = Object.extend({}, context[workload], true);
- });
-
- // Run the next group of workloads in the schedule.
- runWorkloadGroup(threadMgr,
- workloads,
- groupContext,
- cluster,
- clusterOptions,
- executionMode,
- executionOptions,
- errors,
- maxAllowedThreads,
- dbHashBlacklist,
- configServerData,
- cleanupOptions);
- });
- } catch (err) {
- activeException = true;
- throw err;
- } finally {
- // Set a flag so background threads know to terminate.
- bgThreadMgr.markAllForTermination();
- errors.push(...bgThreadMgr.joinAll().map(
- e => new WorkloadFailure(
- e.err, e.stack, e.tid, 'Background ' + e.workloads.join(' '))));
+ if (cluster.isSharded() && errors.length) {
+ jsTest.log('Config Server Data:\n' + tojsononeline(configServerData));
}
- } finally {
- try {
- // Call each background workload's teardown function.
- bgCleanup.forEach(bgWorkload => cleanupWorkload(bgWorkload,
- bgContext,
- cluster,
- errors,
- 'Background',
- dbHashBlacklist,
- cleanupOptions));
- // TODO: Call cleanupWorkloadData() on background workloads here if no background
- // workload teardown functions fail.
-
- // Replace the active exception with an exception describing the errors from all
- // the foreground and background workloads. IterationEnd errors are ignored because
- // they are thrown when the background workloads are instructed by the thread
- // manager to terminate.
- var workloadErrors = errors.filter(e => !e.err.startsWith('IterationEnd:'));
-
- if (cluster.isSharded() && workloadErrors.length) {
- jsTest.log('Config Server Data:\n' + tojsononeline(configServerData));
- }
- throwError(workloadErrors);
- } catch (err) {
- activeException = true;
- throw err;
- } finally {
- // We preserve the data files when an FSM workload failed so that they can later be
- // archived to S3.
- const opts = activeException ? {noCleanData: true} : {};
- cluster.teardown(opts);
- }
+ throwError(errors);
+ } finally {
+ cluster.teardown();
}
}
diff --git a/jstests/concurrency/fsm_libs/thread_mgr.js b/jstests/concurrency/fsm_libs/thread_mgr.js
index bfe4bea5f03..a6e361f1103 100644
--- a/jstests/concurrency/fsm_libs/thread_mgr.js
+++ b/jstests/concurrency/fsm_libs/thread_mgr.js
@@ -53,7 +53,7 @@ var ThreadManager = function(clusterOptions, executionMode = {composed: false})
'the maximum allowed threads must be an integer');
function computeNumThreads() {
- // If we don't have any workloads, such as having no background workloads, return 0.
+ // If we don't have any workloads, return 0.
if (workloads.length === 0) {
return 0;
}
@@ -191,19 +191,6 @@ var ThreadManager = function(clusterOptions, executionMode = {composed: false})
return errors;
};
-
- this.markAllForTermination = function markAllForTermination() {
- if (_workloads.length === 0) {
- return;
- }
-
- // Background threads periodically check the 'fsm_background' collection of the
- // 'config' database for a document specifying { terminate: true }. If such a
- // document is found the background thread terminates.
- var coll = _context[_workloads[0]].db.getSiblingDB('config').fsm_background;
- assert.writeOK(coll.update({terminate: true}, {terminate: true}, {upsert: true}));
-
- };
};
/**