summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMax Hirschhorn <max.hirschhorn@mongodb.com>2018-03-25 01:22:47 -0400
committerMax Hirschhorn <max.hirschhorn@mongodb.com>2018-03-25 01:22:47 -0400
commit30e5511cd29b2d5f19b7746d6b6a9d1b32724002 (patch)
treee514856b32416e0c3c752190c2ea181cd87075ab
parent92e92c023843c2f73ce752284a14fa1e3ca92933 (diff)
downloadmongo-30e5511cd29b2d5f19b7746d6b6a9d1b32724002.tar.gz
SERVER-33068 Fix run_check_repl_dbhash.js hook to actually run dbhash.
Consolidates the logic in the run_check_repl_dbhash.js and run_validate_collections.js for discovering all of the mongod processes in a MongoDB deployment into a new discover_topology.js library. Also adds a test that relies on mongod logging to verify that the run_check_repl_dbhash.js and run_validate_collections.js hooks execute on all of the expected servers. (cherry picked from commit 64bed8173387fbafcfcf39bfb9aa6cecadf25822)
-rw-r--r--jstests/hooks/run_check_repl_dbhash.js341
-rw-r--r--jstests/hooks/run_validate_collections.js97
-rw-r--r--jstests/libs/discover_topology.js102
-rw-r--r--jstests/noPassthrough/data_consistency_checks.js187
4 files changed, 483 insertions, 244 deletions
diff --git a/jstests/hooks/run_check_repl_dbhash.js b/jstests/hooks/run_check_repl_dbhash.js
index a17e407be0e..9f8f98ab36a 100644
--- a/jstests/hooks/run_check_repl_dbhash.js
+++ b/jstests/hooks/run_check_repl_dbhash.js
@@ -3,197 +3,190 @@
'use strict';
(function() {
- load('jstests/libs/parallelTester.js');
+ load('jstests/libs/discover_topology.js'); // For Topology and DiscoverTopology.
+ load('jstests/libs/parallelTester.js'); // For ScopedThread.
+
+ // A thin wrapper around master/slave nodes that provides methods necessary for checking data
+ // consistency between the master and slave nodes.
+ //
+ // DEPRECATED: This wrapper is only intended to be used for the master-slave deployment started
+ // by resmoke.py as part of the master_slave_jscore_passthrough.yml test suite and it shouldn't
+ // be used for any other master/slave tests.
+ function MasterSlaveDBHashTest(primaryHost) {
+ const master = new Mongo(primaryHost);
+ const masterPort = master.host.split(':')[1];
+ const slave = new Mongo('localhost:' + String(parseInt(masterPort) + 1));
+
+ this.nodeList = function nodeList() {
+ return [master.host, slave.host];
+ };
+
+ this.getHashes = function getHashes(db) {
+ const combinedRes = {};
+ let res = master.getDB(db).runCommand('dbhash');
+ assert.commandWorked(res);
+ combinedRes.master = res;
+
+ res = slave.getDB(db).runCommand('dbhash');
+ assert.commandWorked(res);
+ combinedRes.slaves = [res];
+
+ return combinedRes;
+ };
+
+ this.getPrimary = function getPrimary() {
+ slave.setSlaveOk();
+ this.liveNodes = {master: master, slaves: [slave]};
+ return master;
+ };
+
+ this.getSecondaries = function getSecondaries() {
+ return [slave];
+ };
+
+ this.awaitReplication = function awaitReplication() {
+ assert.commandWorked(master.adminCommand({fsyncUnlock: 1}),
+ 'failed to unlock the primary');
+
+ print('Starting fsync on master to flush all pending writes');
+ assert.commandWorked(master.adminCommand({fsync: 1}));
+ print('fsync on master completed');
+
+ const kTimeout = 5 * 60 * 1000; // 5 minute timeout
+ const dbNames = master.getDBNames();
+
+ for (let dbName of dbNames) {
+ if (dbName === 'local') {
+ continue;
+ }
- function isMasterSlave(uri) {
- const mongo = new Mongo(uri);
- jsTest.authenticate(mongo);
- const cmdLineOpts = mongo.getDB('admin').adminCommand('getCmdLineOpts');
- assert.commandWorked(cmdLineOpts);
- return cmdLineOpts.parsed.master === true;
+ print('Awaiting replication of inserts into ' + dbName);
+ assert.writeOK(master.getDB(dbName).await_repl.insert(
+ {awaiting: 'repl'}, {writeConcern: {w: 2, wtimeout: kTimeout}}),
+ 'Awaiting replication failed');
+ }
+ print('Finished awaiting replication');
+ assert.commandWorked(master.adminCommand({fsync: 1, lock: 1}),
+ 'failed to re-lock the primary');
+ };
+
+ this.checkReplicatedDataHashes = function checkReplicatedDataHashes() {
+ const msgPrefix = 'checkReplicatedDataHashes for master-slave deployment';
+
+ new ReplSetTest({nodes: 0}).checkReplicatedDataHashes.call(this, msgPrefix);
+ };
+
+ this.checkReplicaSet = function checkReplicaSet() {
+ new ReplSetTest({nodes: 0}).checkReplicaSet.apply(this, arguments);
+ };
+
+ this.dumpOplog = function dumpOplog() {
+ print('Not dumping oplog for master-slave deployment');
+ };
}
- function isMultiNodeReplSet(uri) {
- const mongo = new Mongo(uri);
- let hosts = [];
- const isMaster = mongo.adminCommand({isMaster: 1});
- if (isMaster.hasOwnProperty('setName')) {
- let hosts = isMaster.hosts;
- if (isMaster.hasOwnProperty('passives')) {
- hosts = hosts.concat(isMaster.passives);
- }
- }
- return hosts.length > 1;
+ function isMasterSlaveDeployment(conn) {
+ const cmdLineOpts = assert.commandWorked(conn.adminCommand({getCmdLineOpts: 1}));
+ return cmdLineOpts.parsed.master === true;
}
- // Adds the uri and description (replset or master-slave) if server needs dbhash check.
- function checkAndAddServerDesc(uri, out) {
- // No need to check the dbhash of single node replsets.
- if (isMultiNodeReplSet(uri)) {
- out.push({type: 'replset', uri: uri});
- } else if (isMasterSlave(uri)) {
- out.push({type: 'master-slave', uri: uri});
+ function checkReplicatedDataHashesThread(hosts, testData) {
+ try {
+ TestData = testData;
+ new ReplSetTest(hosts[0]).checkReplicatedDataHashes();
+ return {ok: 1};
+ } catch (e) {
+ return {ok: 0, hosts: hosts, error: e.toString(), stack: e.stack};
}
}
- function checkReplDataHashThread(serverDesc, testData, excludedDBs) {
- // A thin wrapper around master/slave nodes that provides the getHashes(), getPrimary(),
- // awaitReplication(), and nodeList() methods.
- // DEPRECATED: this wrapper only supports nodes started through resmoke's masterslave.py
- // fixture. Please do not use it with other master/slave clusters.
- function MasterSlaveDBHashTest(primaryHost) {
- const master = new Mongo(primaryHost);
- const masterPort = master.host.split(':')[1];
- const slave = new Mongo('localhost:' + String(parseInt(masterPort) + 1));
-
- this.nodeList = function() {
- return [master.host, slave.host];
- };
-
- this.getHashes = function(db) {
- const combinedRes = {};
- let res = master.getDB(db).runCommand('dbhash');
- assert.commandWorked(res);
- combinedRes.master = res;
-
- res = slave.getDB(db).runCommand('dbhash');
- assert.commandWorked(res);
- combinedRes.slaves = [res];
-
- return combinedRes;
- };
-
- this.getPrimary = function() {
- slave.setSlaveOk();
- this.liveNodes = {master: master, slaves: [slave]};
- return master;
- };
-
- this.getSecondaries = function() {
- return [slave];
- };
-
- this.awaitReplication = function() {
- assert.commandWorked(master.adminCommand({fsyncUnlock: 1}),
- 'failed to unlock the primary');
-
- print('Starting fsync on master to flush all pending writes');
- assert.commandWorked(master.adminCommand({fsync: 1}));
- print('fsync on master completed');
-
- const kTimeout = 60 * 1000 * 5; // 5min timeout
- const dbNames = master.getDBNames();
- print('Awaiting replication of inserts into ' + dbNames);
- for (let dbName of dbNames) {
- if (dbName === 'local')
- continue;
- assert.writeOK(
- master.getDB(dbName).await_repl.insert(
- {awaiting: 'repl'}, {writeConcern: {w: 2, wtimeout: kTimeout}}),
- 'Awaiting replication failed');
- }
- print('Finished awaiting replication');
- assert.commandWorked(master.adminCommand({fsync: 1, lock: 1}),
- 'failed to re-lock the primary');
- };
-
- this.checkReplicatedDataHashes = function() {
- ReplSetTest({nodes: 0}).checkReplicatedDataHashes.apply(this, arguments);
- };
-
- this.checkReplicaSet = function() {
- ReplSetTest({nodes: 0}).checkReplicaSet.apply(this, arguments);
- };
-
- this.dumpOplog = function() {
- print('master-slave cannot dump oplog');
- };
+ const startTime = Date.now();
+ assert.neq(typeof db, 'undefined', 'No `db` object, is the shell connected to a mongod?');
+
+ let skipped = false;
+ try {
+ const conn = db.getMongo();
+
+ if (isMasterSlaveDeployment(conn)) {
+ new MasterSlaveDBHashTest(conn.host).checkReplicatedDataHashes();
+ return;
}
- TestData = testData;
-
- // Since UUIDs aren't explicitly replicated in master-slave deployments, we ignore the UUID
- // in the output of the 'listCollections' command to avoid reporting a known data
- // inconsistency issue from checkReplicatedDataHashes().
- const ignoreUUIDs = serverDesc.type === 'master-slave';
- let fixture = null;
- if (serverDesc.type === 'replset') {
- fixture = new ReplSetTest(serverDesc.uri);
- } else if (serverDesc.type === 'master-slave') {
- fixture = new MasterSlaveDBHashTest(serverDesc.uri);
- } else {
- throw 'unrecognized server type ' + serverDesc.type;
+ const topology = DiscoverTopology.findConnectedNodes(conn);
+
+ if (topology.type === Topology.kStandalone) {
+ print('Skipping data consistency checks for cluster because we are connected to a' +
+ ' stand-alone mongod: ' + tojsononeline(topology));
+ skipped = true;
+ return;
}
- fixture.checkReplicatedDataHashes(undefined, excludedDBs, ignoreUUIDs);
- }
- let startTime = Date.now();
- assert.neq(typeof db, 'undefined', 'No `db` object, is the shell connected to a mongod?');
+ if (topology.type === Topology.kReplicaSet) {
+ if (topology.nodes.length === 1) {
+ print('Skipping data consistency checks for cluster because we are connected to a' +
+ ' 1-node replica set: ' + tojsononeline(topology));
+ skipped = true;
+ return;
+ }
- // stores each server type (master/slave or replset) and uri.
- const serversNeedingReplDataHashCheck = [];
- const primaryInfo = db.isMaster();
- const isMongos = primaryInfo.msg === 'isdbgrid';
- const isReplSet = primaryInfo.hasOwnProperty('setName');
- const uri = db.getMongo().host;
-
- assert(primaryInfo.ismaster,
- 'shell is not connected to the primary or master node: ' + tojson(primaryInfo));
-
- assert(isMongos || isReplSet || isMasterSlave(uri),
- 'not replset, master/slave, or sharded cluster');
-
- if (isMongos) {
- // Add shards and config server if they are replica sets.
- let res = db.adminCommand('getShardMap');
- assert.commandWorked(res);
- const csURI = res.map.config;
- res = db.adminCommand('listShards');
- assert.commandWorked(res);
- const shardURIs = res.shards.map((shard) => shard.host);
-
- checkAndAddServerDesc(csURI, serversNeedingReplDataHashCheck);
- shardURIs.forEach((shardURI) => {
- checkAndAddServerDesc(shardURI, serversNeedingReplDataHashCheck);
- });
- } else {
- checkAndAddServerDesc(uri, serversNeedingReplDataHashCheck);
- }
+ new ReplSetTest(topology.nodes[0]).checkReplicatedDataHashes();
+ return;
+ }
- const threads = [];
- const excludedDBs = jsTest.options().excludedDBsFromDBHash || [];
- serversNeedingReplDataHashCheck.forEach((serverDesc) => {
- const thread = new ScopedThread(checkReplDataHashThread, serverDesc, TestData, excludedDBs);
- threads.push({serverDesc: serverDesc, handle: thread});
- thread.start();
- });
-
- if (serversNeedingReplDataHashCheck.length === 0) {
- let skipReason = 'No multi-node replication detected in ';
- if (isMongos) {
- skipReason += 'sharded cluster';
- } else if (isReplSet) {
- skipReason += 'replica set';
- } else {
- skipReason += 'master-slave set';
+ if (topology.type !== Topology.kShardedCluster) {
+ throw new Error('Unrecognized topology format: ' + tojson(topology));
}
- print('Skipping consistency checks for cluster because ' + skipReason);
- return;
- }
+ const threads = [];
+ try {
+ if (topology.configsvr.nodes.length > 1) {
+ const thread = new ScopedThread(
+ checkReplicatedDataHashesThread, topology.configsvr.nodes, TestData);
+ threads.push(thread);
+ thread.start();
+ } else {
+ print('Skipping data consistency checks for 1-node CSRS: ' +
+ tojsononeline(topology));
+ }
- const failedChecks = [];
- threads.forEach(thread => {
- thread.handle.join();
- if (thread.handle.hasFailed()) {
- failedChecks.push(thread.serverDesc.uri + ' (' + thread.serverDesc.type + ')');
- }
- });
+ for (let shardName of Object.keys(topology.shards)) {
+ const shard = topology.shards[shardName];
- assert.eq(failedChecks.length,
- 0,
- 'dbhash check failed for the following hosts: ' + failedChecks.join(','));
+ if (shard.type === Topology.kStandalone) {
+ print('Skipping data consistency checks for stand-alone shard: ' +
+ tojsononeline(topology));
+ continue;
+ }
- const totalTime = Date.now() - startTime;
- print('Finished consistency checks of cluster in ' + totalTime + ' ms.');
+ if (shard.type !== Topology.kReplicaSet) {
+ throw new Error('Unrecognized topology format: ' + tojson(topology));
+ }
+
+ if (shard.nodes.length > 1) {
+ const thread =
+ new ScopedThread(checkReplicatedDataHashesThread, shard.nodes, TestData);
+ threads.push(thread);
+ thread.start();
+ } else {
+ print('Skipping data consistency checks for 1-node replica set shard: ' +
+ tojsononeline(topology));
+ }
+ }
+ } finally {
+ // Wait for each thread to finish. Throw an error if any thread fails.
+ const returnData = threads.map(thread => {
+ thread.join();
+ return thread.returnData();
+ });
+
+ returnData.forEach(res => {
+ assert.commandWorked(res, 'data consistency checks failed');
+ });
+ }
+ } finally {
+ if (!skipped) {
+ const totalTime = Date.now() - startTime;
+ print('Finished data consistency checks for cluster in ' + totalTime + ' ms.');
+ }
+ }
})();
diff --git a/jstests/hooks/run_validate_collections.js b/jstests/hooks/run_validate_collections.js
index 0fd699b8e0e..525003148e1 100644
--- a/jstests/hooks/run_validate_collections.js
+++ b/jstests/hooks/run_validate_collections.js
@@ -3,76 +3,34 @@
'use strict';
(function() {
- assert.eq(typeof db, 'object', 'Invalid `db` object, is the shell connected to a mongod?');
- load('jstests/libs/parallelTester.js');
-
- function getDirectConnections(conn) {
- // If conn does not point to a repl set, then this function returns [conn].
- const res = conn.adminCommand({isMaster: 1});
- const connections = [];
-
- if (res.hasOwnProperty('setName')) {
- for (let hostString of res.hosts) {
- connections.push(new Mongo(hostString));
- }
- if (res.hasOwnProperty('passives')) {
- for (let hostString of res.passives) {
- connections.push(new Mongo(hostString));
- }
- }
- } else {
- connections.push(conn);
- }
-
- return connections;
- }
-
- function getConfigConnStr() {
- const shardMap = db.adminCommand({getShardMap: 1});
- if (!shardMap.hasOwnProperty('map')) {
- throw new Error('Expected getShardMap() to return an object a "map" field: ' +
- tojson(shardMap));
- }
-
- const map = shardMap.map;
+ load('jstests/libs/discover_topology.js'); // For Topology and DiscoverTopology.
+ load('jstests/libs/parallelTester.js'); // For ScopedThread.
- if (!map.hasOwnProperty('config')) {
- throw new Error('Expected getShardMap().map to have a "config" field: ' + tojson(map));
- }
-
- return map.config;
- }
-
- function isMongos() {
- return db.isMaster().msg === 'isdbgrid';
- }
-
- function getServerList() {
- const serverList = [];
-
- if (isMongos()) {
- // We're connected to a sharded cluster through a mongos.
-
- // 1) Add all the config servers to the server list.
- const configConnStr = getConfigConnStr();
- const configServerReplSetConn = new Mongo(configConnStr);
- serverList.push(...getDirectConnections(configServerReplSetConn));
-
- // 2) Add shard members to the server list.
- const configDB = db.getSiblingDB('config');
- const cursor = configDB.shards.find();
-
- while (cursor.hasNext()) {
- const shard = cursor.next();
- const shardReplSetConn = new Mongo(shard.host);
- serverList.push(...getDirectConnections(shardReplSetConn));
+ assert.eq(typeof db, 'object', 'Invalid `db` object, is the shell connected to a mongod?');
+ const topology = DiscoverTopology.findConnectedNodes(db.getMongo());
+
+ const hostList = [];
+
+ if (topology.type === Topology.kStandalone) {
+ hostList.push(topology.mongod);
+ } else if (topology.type === Topology.kReplicaSet) {
+ hostList.push(...topology.nodes);
+ } else if (topology.type === Topology.kShardedCluster) {
+ hostList.push(...topology.configsvr.nodes);
+
+ for (let shardName of Object.keys(topology.shards)) {
+ const shard = topology.shards[shardName];
+
+ if (shard.type === Topology.kStandalone) {
+ hostList.push(shard.mongod);
+ } else if (shard.type === Topology.kReplicaSet) {
+ hostList.push(...shard.nodes);
+ } else {
+ throw new Error('Unrecognized topology format: ' + tojson(topology));
}
- } else {
- // We're connected to a mongod.
- serverList.push(...getDirectConnections(db.getMongo()));
}
-
- return serverList;
+ } else {
+ throw new Error('Unrecognized topology format: ' + tojson(topology));
}
// Run a separate thread to validate collections on each server in parallel.
@@ -103,11 +61,10 @@
// We run the scoped threads in a try/finally block in case any thread throws an exception, in
// which case we want to still join all the threads.
let threads = [];
- const serverList = getServerList();
try {
- serverList.forEach(server => {
- const thread = new ScopedThread(validateCollectionsThread, server.host, TestData);
+ hostList.forEach(host => {
+ const thread = new ScopedThread(validateCollectionsThread, host, TestData);
threads.push(thread);
thread.start();
});
diff --git a/jstests/libs/discover_topology.js b/jstests/libs/discover_topology.js
new file mode 100644
index 00000000000..95b072894b0
--- /dev/null
+++ b/jstests/libs/discover_topology.js
@@ -0,0 +1,102 @@
+'use strict';
+
+// The tojson() function that is commonly used to build up assertion messages doesn't support the
+// Symbol type, so we just use unique string values instead.
+var Topology = {
+ kStandalone: 'stand-alone',
+ kReplicaSet: 'replica set',
+ kShardedCluster: 'sharded cluster',
+};
+
+var DiscoverTopology = (function() {
+ const kDefaultConnectFn = (host) => new Mongo(host);
+
+ function getDataMemberConnectionStrings(conn) {
+ const res = conn.adminCommand({isMaster: 1});
+
+ if (!res.hasOwnProperty('setName')) {
+ // 'conn' represents a connection to a stand-alone mongod.
+ return {type: Topology.kStandalone, mongod: conn.host};
+ }
+
+ // The "passives" field contains the list of unelectable (priority=0) secondaries
+ // and is omitted from the server's response when there are none.
+ res.passives = res.passives || [];
+ return {type: Topology.kReplicaSet, nodes: [...res.hosts, ...res.passives]};
+ }
+
+ function findConnectedNodesViaMongos(conn, options) {
+ function getConfigServerConnectionString() {
+ const shardMap = conn.adminCommand({getShardMap: 1});
+
+ if (!shardMap.hasOwnProperty('map')) {
+ throw new Error(
+ 'Expected "getShardMap" command to return an object with a "map" field: ' +
+ tojson(shardMap));
+ }
+
+ if (!shardMap.map.hasOwnProperty('config')) {
+ throw new Error(
+ 'Expected "getShardMap" command to return an object with a "map.config"' +
+ ' field: ' + tojson(shardMap));
+ }
+
+ return shardMap.map.config;
+ }
+
+ const connectFn =
+ options.hasOwnProperty('connectFn') ? options.connectFn : kDefaultConnectFn;
+
+ const configsvrConn = connectFn(getConfigServerConnectionString());
+ const configsvrHosts = getDataMemberConnectionStrings(configsvrConn);
+
+ const shards = assert.commandWorked(conn.adminCommand({listShards: 1})).shards;
+ const shardHosts = {};
+
+ for (let shardInfo of shards) {
+ const shardConn = connectFn(shardInfo.host);
+ shardHosts[shardInfo._id] = getDataMemberConnectionStrings(shardConn);
+ }
+
+ return {type: Topology.kShardedCluster, configsvr: configsvrHosts, shards: shardHosts};
+ }
+
+ return {
+ /**
+ * Returns an object describing the topology of the mongod processes reachable from 'conn'.
+ * The "connectFn" property can be optionally specified to support custom retry logic when
+ * making connection attempts without overriding the Mongo constructor itself.
+ *
+ * For a stand-alone mongod, an object of the form
+ * {type: Topology.kStandalone, mongod: <conn-string>}
+ * is returned.
+ *
+ * For a replica set, an object of the form
+ * {type: Topology.kReplicaSet, nodes: [<conn-string1>, <conn-string2>, ...]}
+ * is returned.
+ *
+ * For a sharded cluster, an object of the form
+ * {
+ * type: Topology.kShardedCluster,
+ * configsvr: {nodes: [...]},
+ * shards: {
+ * <shard-name1>: {type: Topology.kStandalone, mongod: ...},
+ * <shard-name2>: {type: Topology.kReplicaSet, nodes: [...]},
+ * ...
+ * }
+ * }
+ * is returned, where the description for each shard depends on whether it is a stand-alone
+ * shard or a replica set shard.
+ */
+ findConnectedNodes: function findConnectedNodes(conn,
+ options = {connectFn: kDefaultConnectFn}) {
+ const isMongod = conn.adminCommand({isMaster: 1}).msg !== 'isdbgrid';
+
+ if (isMongod) {
+ return getDataMemberConnectionStrings(conn);
+ }
+
+ return findConnectedNodesViaMongos(conn, options);
+ },
+ };
+})();
diff --git a/jstests/noPassthrough/data_consistency_checks.js b/jstests/noPassthrough/data_consistency_checks.js
new file mode 100644
index 00000000000..7a1cfacdfdf
--- /dev/null
+++ b/jstests/noPassthrough/data_consistency_checks.js
@@ -0,0 +1,187 @@
+/**
+ * Verifies that the data consistency checks work against the variety of cluster types we use in our
+ * testing.
+ *
+ * @tags: [requires_replication, requires_sharding]
+ */
+
+// The global 'db' variable is used by the data consistency hooks.
+var db;
+
+(function() {
+ "use strict";
+
+ // We skip doing the data consistency checks while terminating the cluster because they conflict
+ // with the counts of the number of times the "dbhash" and "validate" commands are run.
+ TestData.skipCollectionAndIndexValidation = true;
+ TestData.skipCheckDBHashes = true;
+
+ function makePatternForDBHash(dbName) {
+ return new RegExp("COMMAND.*command " + dbName +
+ "\\.\\$cmd appName: \"MongoDB Shell\" command: db[Hh]ash",
+ "g");
+ }
+
+ function makePatternForValidate(dbName, collName) {
+ return new RegExp(
+ "COMMAND.*command " + dbName +
+ "\\.\\$cmd appName: \"MongoDB Shell\" command: validate { validate: \"" + collName +
+ "\"",
+ "g");
+ }
+
+ function countMatches(pattern, output) {
+ assert(pattern.global, "the 'g' flag must be used to find all matches");
+
+ let numMatches = 0;
+ while (pattern.exec(output) !== null) {
+ ++numMatches;
+ }
+ return numMatches;
+ }
+
+ function runDataConsistencyChecks(testCase) {
+ db = testCase.conn.getDB("test");
+ try {
+ clearRawMongoProgramOutput();
+
+ load("jstests/hooks/run_check_repl_dbhash.js");
+ load("jstests/hooks/run_validate_collections.js");
+
+ // We terminate the processes to ensure that the next call to rawMongoProgramOutput()
+ // will return all of their output.
+ testCase.teardown();
+ return rawMongoProgramOutput();
+ } finally {
+ db = undefined;
+ }
+ }
+
+ (function testReplicaSetWithVotingSecondaries() {
+ const numNodes = 2;
+ const rst = new ReplSetTest({
+ nodes: numNodes,
+ nodeOptions: {
+ setParameter: {logComponentVerbosity: tojson({command: 1})},
+ }
+ });
+ rst.startSet();
+ rst.initiateWithNodeZeroAsPrimary();
+
+ // Insert a document so the "dbhash" and "validate" commands have some actual work to do.
+ assert.writeOK(rst.nodes[0].getDB("test").mycoll.insert({}));
+ const output =
+ runDataConsistencyChecks({conn: rst.nodes[0], teardown: () => rst.stopSet()});
+
+ let pattern = makePatternForDBHash("test");
+ assert.eq(numNodes,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) + " from each node in the log output");
+
+ pattern = makePatternForValidate("test", "mycoll");
+ assert.eq(numNodes,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) + " from each node in the log output");
+ })();
+
+ (function testReplicaSetWithNonVotingSecondaries() {
+ const numNodes = 2;
+ const rst = new ReplSetTest({
+ nodes: numNodes,
+ nodeOptions: {
+ setParameter: {logComponentVerbosity: tojson({command: 1})},
+ }
+ });
+ rst.startSet();
+
+ const replSetConfig = rst.getReplSetConfig();
+ for (let i = 1; i < numNodes; ++i) {
+ replSetConfig.members[i].priority = 0;
+ replSetConfig.members[i].votes = 0;
+ }
+ rst.initiate(replSetConfig);
+
+ // Insert a document so the "dbhash" and "validate" commands have some actual work to do.
+ assert.writeOK(rst.nodes[0].getDB("test").mycoll.insert({}));
+ const output =
+ runDataConsistencyChecks({conn: rst.nodes[0], teardown: () => rst.stopSet()});
+
+ let pattern = makePatternForDBHash("test");
+ assert.eq(numNodes,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) + " from each node in the log output");
+
+ pattern = makePatternForValidate("test", "mycoll");
+ assert.eq(numNodes,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) + " from each node in the log output");
+ })();
+
+ (function testShardedClusterWithOneNodeCSRS() {
+ const st = new ShardingTest({
+ mongos: 1,
+ config: 1,
+ configOptions: {
+ setParameter: {logComponentVerbosity: tojson({command: 1})},
+ },
+ shards: 0
+ });
+
+ const output = runDataConsistencyChecks({conn: st.s, teardown: () => st.stop()});
+
+ let pattern = makePatternForDBHash("config");
+ assert.eq(0,
+ countMatches(pattern, output),
+ "expected not to find " + tojson(pattern) + " in the log output for 1-node CSRS");
+
+ pattern = makePatternForValidate("config", "mongos");
+ assert.eq(1,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) + " in the log output for 1-node CSRS");
+ })();
+
+ (function testShardedCluster() {
+ const st = new ShardingTest({
+ mongos: 1,
+ config: 3,
+ configOptions: {
+ setParameter: {logComponentVerbosity: tojson({command: 1})},
+ },
+ shards: 1,
+ rs: {nodes: 2},
+ rsOptions: {
+ setParameter: {logComponentVerbosity: tojson({command: 1})},
+ }
+ });
+
+ // Insert a document so the "dbhash" and "validate" commands have some actual work to do on
+ // the replica set shard.
+ assert.writeOK(st.s.getDB("test").mycoll.insert({}));
+ const output = runDataConsistencyChecks({conn: st.s, teardown: () => st.stop()});
+
+ // The "admin" database exists on both the CSRS and the replica set shards due to the
+ // "admin.system.version" collection.
+ let pattern = makePatternForDBHash("admin");
+ assert.eq(5,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) +
+ " from each CSRS node and each replica set shard node in the log output");
+
+ pattern = makePatternForValidate("config", "mongos");
+ assert.eq(3,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) + " from each CSRS node in the log output");
+
+ pattern = makePatternForDBHash("test");
+ assert.eq(2,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) +
+ " from each replica set shard node in the log output");
+
+ pattern = makePatternForValidate("test", "mycoll");
+ assert.eq(2,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) +
+ " from each replica set shard node in the log output");
+ })();
+})();