summaryrefslogtreecommitdiff
path: root/jstests/concurrency
diff options
context:
space:
mode:
authorJonathan Abrahams <jonathan@mongodb.com>2015-12-02 10:09:20 -0500
committerJonathan Abrahams <jonathan@mongodb.com>2015-12-02 10:10:18 -0500
commitdd08d650c2734a86d7bebb54aa84cc38560b1f2a (patch)
treef8caee451fe9d1572dc531898db45fe1c9602ffb /jstests/concurrency
parent8fe6428bf9dfbcb7e2615d47634101b2568262e0 (diff)
downloadmongo-dd08d650c2734a86d7bebb54aa84cc38560b1f2a.tar.gz
SERVER-21115 Add dbHash checking to concurrency suite
Diffstat (limited to 'jstests/concurrency')
-rw-r--r--jstests/concurrency/fsm_libs/cluster.js74
-rw-r--r--jstests/concurrency/fsm_libs/runner.js47
2 files changed, 107 insertions, 14 deletions
diff --git a/jstests/concurrency/fsm_libs/cluster.js b/jstests/concurrency/fsm_libs/cluster.js
index 66f89047075..f7b28942512 100644
--- a/jstests/concurrency/fsm_libs/cluster.js
+++ b/jstests/concurrency/fsm_libs/cluster.js
@@ -109,6 +109,7 @@ var Cluster = function(options) {
};
var nextConn = 0;
var primaries = [];
+ var replSets = [];
// TODO: Define size of replica set from options
var replSetNodes = 3;
@@ -172,6 +173,7 @@ var Cluster = function(options) {
while (rsTest) {
this._addReplicaSetConns(rsTest);
primaries.push(rsTest.getPrimary());
+ replSets.push(rsTest);
++i;
rsTest = st['rs' + i];
}
@@ -200,6 +202,7 @@ var Cluster = function(options) {
conn = rst.getPrimary();
primaries = [conn];
+ replSets = [rst];
this.teardown = function teardown() {
options.teardownFunctions.mongod.forEach(this.executeOnMongodNodes);
@@ -403,7 +406,11 @@ var Cluster = function(options) {
st.stopBalancer();
};
- this.awaitReplication = function awaitReplication() {
+ this.isBalancerEnabled = function isBalancerEnabled() {
+ return this.isSharded() && options.enableBalancer;
+ };
+
+ this.awaitReplication = function awaitReplication(message) {
if (this.isReplication()) {
var wc = {
writeConcern: {
@@ -413,19 +420,78 @@ var Cluster = function(options) {
};
primaries.forEach(function(primary) {
var startTime = Date.now();
- jsTest.log(primary.host + ': awaitReplication started');
+ jsTest.log(primary.host + ': awaitReplication started ' + message);
// Insert a document with a writeConcern for all nodes in the replica set to
// ensure that all previous workload operations have completed on secondaries
var result = primary.getDB('test').fsm_teardown.insert({ a: 1 }, wc);
assert.writeOK(result, 'teardown insert failed: ' + tojson(result));
- assert(primary.getDB('test').fsm_teardown.drop(), 'teardown drop failed');
var totalTime = Date.now() - startTime;
- jsTest.log(primary.host + ': awaitReplication completed in ' + totalTime + ' ms');
+ jsTest.log(primary.host + ': awaitReplication ' + message + ' completed in ' +
+ totalTime + ' ms');
});
}
};
+
+ // Returns true if the specified DB contains a capped collection.
+ var containsCappedCollection = function containsCappedCollection(db) {
+ return db.getCollectionNames().some(coll => db[coll].isCapped());
+ };
+
+ // Checks dbHashes for databases that are not on the blacklist.
+ // All replica set nodes are checked.
+ this.checkDbHashes = function checkDbHashes(dbBlacklist, message) {
+ if (!this.isReplication() || this.isBalancerEnabled()) {
+ return;
+ }
+
+ var res = this.getDB('admin').runCommand('listDatabases');
+ assert.commandWorked(res);
+
+ res.databases.forEach(function(dbInfo) {
+ if (Array.contains(dbBlacklist, dbInfo.name)) {
+ return;
+ }
+ var hasCappedColl = containsCappedCollection(this.getDB(dbInfo.name));
+
+ replSets.forEach(function(replSet) {
+ var hashes = replSet.getHashes(dbInfo.name);
+ var masterHashes = hashes.master;
+ assert.commandWorked(masterHashes);
+ var dbHash = masterHashes.md5;
+
+ hashes.slaves.forEach(function(slaveHashes) {
+ assert.commandWorked(slaveHashes);
+ assert.eq(masterHashes.numCollections,
+ slaveHashes.numCollections,
+ message + ' dbHash number of collections in db ' +
+ dbInfo.name + ' ' + tojson(hashes));
+
+ if (!hasCappedColl) {
+ // dbHash on a DB not containing a capped collection should match.
+ assert.eq(dbHash,
+ slaveHashes.md5,
+ message + ' dbHash inconsistency for db ' +
+ dbInfo.name + ' ' + tojson(hashes));
+ } else {
+ // dbHash on a DB containing a capped collection will not return
+ // consistent results between the replica set, so we only
+ // check non-capped collections in the DB.
+ var collNames = Object.keys(masterHashes.collections).filter(
+ coll =>
+ !this.getDB(dbInfo.name)[coll].isCapped());
+ collNames.forEach(function(coll) {
+ assert.eq(masterHashes.collections[coll],
+ slaveHashes.collections[coll],
+ message + ' dbHash inconsistency for collection ' + coll +
+ ' in db ' + dbInfo.name + ' ' + tojson(hashes));
+ }, this);
+ }
+ }, this);
+ }, this);
+ }, this);
+ };
};
/**
diff --git a/jstests/concurrency/fsm_libs/runner.js b/jstests/concurrency/fsm_libs/runner.js
index 7e152138ade..696794f8efa 100644
--- a/jstests/concurrency/fsm_libs/runner.js
+++ b/jstests/concurrency/fsm_libs/runner.js
@@ -422,9 +422,23 @@ var runner = (function() {
jsTest.log('End of schedule');
}
- function cleanupWorkload(workload, context, cluster, errors, kind) {
+ function cleanupWorkload(workload, context, cluster, errors, kind, dbHashBlacklist) {
// Returns true if the workload's teardown succeeds and false if the workload's
// teardown fails.
+
+ try {
+ // Ensure that secondaries have caught up before workload teardown.
+ cluster.awaitReplication('before workload teardown');
+
+ // Check dbHash, for all DBs not in dbHashBlacklist, on all nodes
+ // before the workload's teardown method is called.
+ cluster.checkDbHashes(dbHashBlacklist, 'before workload teardown');
+ } catch (e) {
+ errors.push(new WorkloadFailure(e.toString(), e.stack,
+ kind + ' checking consistency on secondaries'));
+ return false;
+ }
+
try {
teardownWorkload(workload, context, cluster);
} catch (e) {
@@ -434,8 +448,9 @@ var runner = (function() {
return true;
}
- function runWorkloadGroup(threadMgr, workloads, context, cluster, clusterOptions, executionMode,
- executionOptions, errors, maxAllowedThreads){
+ function runWorkloadGroup(threadMgr, workloads, context, cluster, clusterOptions,
+ executionMode, executionOptions, errors, maxAllowedThreads,
+ dbHashBlacklist) {
var cleanup = [];
var teardownFailed = false;
var startTime = Date.now(); // Initialize in case setupWorkload fails below.
@@ -479,7 +494,8 @@ var runner = (function() {
// Call each foreground workload's teardown function. After all teardowns have completed
// check if any of them failed.
var cleanupResults = cleanup.map(workload =>
- cleanupWorkload(workload, context, cluster, errors, 'Foreground'));
+ cleanupWorkload(workload, context, cluster, errors,
+ 'Foreground', dbHashBlacklist));
teardownFailed = cleanupResults.some(success => (success === false));
totalTime = Date.now() - startTime;
@@ -495,8 +511,12 @@ var runner = (function() {
// Throw any existing errors so that the schedule aborts.
throwError(errors);
- // Ensure that secondaries have caught up for workload teardown (SERVER-18878).
- cluster.awaitReplication();
+ // Ensure that secondaries have caught up after workload teardown.
+ cluster.awaitReplication('after workload-group teardown and data clean-up');
+
+ // Check dbHash, for all DBs not in dbHashBlacklist, on all nodes
+ // after the workload's teardown method is called.
+ cluster.checkDbHashes(dbHashBlacklist, 'after workload-group teardown and data clean-up');
}
function runWorkloads(workloads,
@@ -555,8 +575,13 @@ var runner = (function() {
// List of DBs that will not be dropped.
var dbBlacklist = ['admin', 'config', 'local', '$external'];
+
+ // List of DBs that dbHash is not run on.
+ var dbHashBlacklist = ['local'];
+
if (cleanupOptions.dropDatabaseBlacklist) {
- dbBlacklist = dbBlacklist.concat(cleanupOptions.dropDatabaseBlacklist);
+ dbBlacklist.push(...cleanupOptions.dropDatabaseBlacklist);
+ dbHashBlacklist.push(...cleanupOptions.dropDatabaseBlacklist);
}
if (!cleanupOptions.keepExistingDatabases) {
dropAllDatabases(cluster.getDB('test'), dbBlacklist);
@@ -612,8 +637,9 @@ var runner = (function() {
});
// Run the next group of workloads in the schedule.
- runWorkloadGroup(threadMgr, workloads, groupContext, cluster, clusterOptions,
- executionMode, executionOptions, errors, maxAllowedThreads);
+ runWorkloadGroup(threadMgr, workloads, groupContext, cluster,
+ clusterOptions, executionMode, executionOptions,
+ errors, maxAllowedThreads, dbHashBlacklist);
});
} finally {
// Set a flag so background threads know to terminate.
@@ -625,7 +651,8 @@ var runner = (function() {
try {
// Call each background workload's teardown function.
bgCleanup.forEach(bgWorkload => cleanupWorkload(bgWorkload, bgContext, cluster,
- errors, 'Background'));
+ errors, 'Background',
+ dbHashBlacklist));
// TODO: Call cleanupWorkloadData() on background workloads here if no background
// workload teardown functions fail.