summaryrefslogtreecommitdiff
path: root/jstests/hooks
diff options
context:
space:
mode:
authorclang-format-7.0.1 <adam.martin@10gen.com>2019-07-26 18:42:24 -0400
committerADAM David Alan Martin <adam.martin@10gen.com>2019-07-26 18:42:24 -0400
commitc1a45ebbb0530e3d0201321d725527f1eb83ffce (patch)
treef523079dc5ded3052eefbdcaae424b7502df5b25 /jstests/hooks
parentc9599d8610c3da0b7c3da65667aff821063cf5b9 (diff)
downloadmongo-c1a45ebbb0530e3d0201321d725527f1eb83ffce.tar.gz
Apply formatting per `clang-format-7.0.1`
Diffstat (limited to 'jstests/hooks')
-rw-r--r--jstests/hooks/drop_sharded_collections.js42
-rw-r--r--jstests/hooks/run_check_repl_dbhash.js155
-rw-r--r--jstests/hooks/run_check_repl_dbhash_background.js810
-rw-r--r--jstests/hooks/run_check_repl_oplogs.js64
-rw-r--r--jstests/hooks/run_initial_sync_node_validation.js73
-rw-r--r--jstests/hooks/run_validate_collections.js67
-rw-r--r--jstests/hooks/validate_collections.js4
7 files changed, 603 insertions, 612 deletions
diff --git a/jstests/hooks/drop_sharded_collections.js b/jstests/hooks/drop_sharded_collections.js
index 5758e3027e5..dc9bc12a5d3 100644
--- a/jstests/hooks/drop_sharded_collections.js
+++ b/jstests/hooks/drop_sharded_collections.js
@@ -3,31 +3,31 @@
* like config.system.sessions).
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/fixture_helpers.js"); // For isMongos.
+load("jstests/libs/fixture_helpers.js"); // For isMongos.
- assert.neq(typeof db, 'undefined', 'No `db` object, is the shell connected to a mongod?');
- assert(FixtureHelpers.isMongos(db), "not connected to mongos");
+assert.neq(typeof db, 'undefined', 'No `db` object, is the shell connected to a mongod?');
+assert(FixtureHelpers.isMongos(db), "not connected to mongos");
- let balSettingResult = assert.commandWorked(db.adminCommand({balancerStatus: 1}));
- if (balSettingResult.mode !== 'off') {
- assert.commandWorked(db.adminCommand({balancerStop: 1}));
- }
-
- db.getSiblingDB('config').collections.find().forEach(collEntry => {
- if (collEntry._id !== 'config.system.sessions') {
- let nsSplit = collEntry._id.split('.');
- const dbName = nsSplit.shift();
- const collName = nsSplit.join('.');
+let balSettingResult = assert.commandWorked(db.adminCommand({balancerStatus: 1}));
+if (balSettingResult.mode !== 'off') {
+ assert.commandWorked(db.adminCommand({balancerStop: 1}));
+}
- // Note: drop also cleans up tags and chunks associated with ns.
- assert.commandWorked(db.getSiblingDB(dbName).runCommand({drop: collName}));
- }
- });
+db.getSiblingDB('config').collections.find().forEach(collEntry => {
+ if (collEntry._id !== 'config.system.sessions') {
+ let nsSplit = collEntry._id.split('.');
+ const dbName = nsSplit.shift();
+ const collName = nsSplit.join('.');
- // Turn balancer back on if it was not off earlier.
- if (balSettingResult.mode !== 'off') {
- assert.commandWorked(db.adminCommand({balancerStart: 1}));
+ // Note: drop also cleans up tags and chunks associated with ns.
+ assert.commandWorked(db.getSiblingDB(dbName).runCommand({drop: collName}));
}
+});
+
+// Turn balancer back on if it was not off earlier.
+if (balSettingResult.mode !== 'off') {
+ assert.commandWorked(db.adminCommand({balancerStart: 1}));
+}
})();
diff --git a/jstests/hooks/run_check_repl_dbhash.js b/jstests/hooks/run_check_repl_dbhash.js
index bae3943964e..9067d4359ad 100644
--- a/jstests/hooks/run_check_repl_dbhash.js
+++ b/jstests/hooks/run_check_repl_dbhash.js
@@ -3,106 +3,105 @@
'use strict';
(function() {
- load('jstests/libs/discover_topology.js'); // For Topology and DiscoverTopology.
- load('jstests/libs/parallelTester.js'); // For ScopedThread.
+load('jstests/libs/discover_topology.js'); // For Topology and DiscoverTopology.
+load('jstests/libs/parallelTester.js'); // For ScopedThread.
- function checkReplicatedDataHashesThread(hosts) {
- load('jstests/libs/override_methods/implicitly_retry_on_background_op_in_progress.js');
+function checkReplicatedDataHashesThread(hosts) {
+ load('jstests/libs/override_methods/implicitly_retry_on_background_op_in_progress.js');
- try {
- const excludedDBs = jsTest.options().excludedDBsFromDBHash;
- const rst = new ReplSetTest(hosts[0]);
- rst.checkReplicatedDataHashes(undefined, excludedDBs);
- if (TestData.checkCollectionCounts) {
- rst.checkCollectionCounts();
- }
- return {ok: 1};
- } catch (e) {
- return {ok: 0, hosts: hosts, error: e.toString(), stack: e.stack};
+ try {
+ const excludedDBs = jsTest.options().excludedDBsFromDBHash;
+ const rst = new ReplSetTest(hosts[0]);
+ rst.checkReplicatedDataHashes(undefined, excludedDBs);
+ if (TestData.checkCollectionCounts) {
+ rst.checkCollectionCounts();
}
+ return {ok: 1};
+ } catch (e) {
+ return {ok: 0, hosts: hosts, error: e.toString(), stack: e.stack};
}
+}
- const startTime = Date.now();
- assert.neq(typeof db, 'undefined', 'No `db` object, is the shell connected to a mongod?');
+const startTime = Date.now();
+assert.neq(typeof db, 'undefined', 'No `db` object, is the shell connected to a mongod?');
- let skipped = false;
- try {
- const conn = db.getMongo();
- const topology = DiscoverTopology.findConnectedNodes(conn);
+let skipped = false;
+try {
+ const conn = db.getMongo();
+ const topology = DiscoverTopology.findConnectedNodes(conn);
- if (topology.type === Topology.kStandalone) {
+ if (topology.type === Topology.kStandalone) {
+ print('Skipping data consistency checks for cluster because we are connected to a' +
+ ' stand-alone mongod: ' + tojsononeline(topology));
+ skipped = true;
+ return;
+ }
+
+ if (topology.type === Topology.kReplicaSet) {
+ if (topology.nodes.length === 1) {
print('Skipping data consistency checks for cluster because we are connected to a' +
- ' stand-alone mongod: ' + tojsononeline(topology));
+ ' 1-node replica set: ' + tojsononeline(topology));
skipped = true;
return;
}
- if (topology.type === Topology.kReplicaSet) {
- if (topology.nodes.length === 1) {
- print('Skipping data consistency checks for cluster because we are connected to a' +
- ' 1-node replica set: ' + tojsononeline(topology));
- skipped = true;
- return;
- }
+ const excludedDBs = jsTest.options().excludedDBsFromDBHash;
+ new ReplSetTest(topology.nodes[0]).checkReplicatedDataHashes(undefined, excludedDBs);
+ return;
+ }
- const excludedDBs = jsTest.options().excludedDBsFromDBHash;
- new ReplSetTest(topology.nodes[0]).checkReplicatedDataHashes(undefined, excludedDBs);
- return;
- }
+ if (topology.type !== Topology.kShardedCluster) {
+ throw new Error('Unrecognized topology format: ' + tojson(topology));
+ }
- if (topology.type !== Topology.kShardedCluster) {
- throw new Error('Unrecognized topology format: ' + tojson(topology));
+ const threads = [];
+ try {
+ if (topology.configsvr.nodes.length > 1) {
+ const thread =
+ new ScopedThread(checkReplicatedDataHashesThread, topology.configsvr.nodes);
+ threads.push(thread);
+ thread.start();
+ } else {
+ print('Skipping data consistency checks for 1-node CSRS: ' + tojsononeline(topology));
}
- const threads = [];
- try {
- if (topology.configsvr.nodes.length > 1) {
- const thread =
- new ScopedThread(checkReplicatedDataHashesThread, topology.configsvr.nodes);
- threads.push(thread);
- thread.start();
- } else {
- print('Skipping data consistency checks for 1-node CSRS: ' +
+ for (let shardName of Object.keys(topology.shards)) {
+ const shard = topology.shards[shardName];
+
+ if (shard.type === Topology.kStandalone) {
+ print('Skipping data consistency checks for stand-alone shard: ' +
tojsononeline(topology));
+ continue;
}
- for (let shardName of Object.keys(topology.shards)) {
- const shard = topology.shards[shardName];
-
- if (shard.type === Topology.kStandalone) {
- print('Skipping data consistency checks for stand-alone shard: ' +
- tojsononeline(topology));
- continue;
- }
-
- if (shard.type !== Topology.kReplicaSet) {
- throw new Error('Unrecognized topology format: ' + tojson(topology));
- }
-
- if (shard.nodes.length > 1) {
- const thread = new ScopedThread(checkReplicatedDataHashesThread, shard.nodes);
- threads.push(thread);
- thread.start();
- } else {
- print('Skipping data consistency checks for 1-node replica set shard: ' +
- tojsononeline(topology));
- }
+ if (shard.type !== Topology.kReplicaSet) {
+ throw new Error('Unrecognized topology format: ' + tojson(topology));
}
- } finally {
- // Wait for each thread to finish. Throw an error if any thread fails.
- const returnData = threads.map(thread => {
- thread.join();
- return thread.returnData();
- });
- returnData.forEach(res => {
- assert.commandWorked(res, 'data consistency checks failed');
- });
+ if (shard.nodes.length > 1) {
+ const thread = new ScopedThread(checkReplicatedDataHashesThread, shard.nodes);
+ threads.push(thread);
+ thread.start();
+ } else {
+ print('Skipping data consistency checks for 1-node replica set shard: ' +
+ tojsononeline(topology));
+ }
}
} finally {
- if (!skipped) {
- const totalTime = Date.now() - startTime;
- print('Finished data consistency checks for cluster in ' + totalTime + ' ms.');
- }
+ // Wait for each thread to finish. Throw an error if any thread fails.
+ const returnData = threads.map(thread => {
+ thread.join();
+ return thread.returnData();
+ });
+
+ returnData.forEach(res => {
+ assert.commandWorked(res, 'data consistency checks failed');
+ });
+ }
+} finally {
+ if (!skipped) {
+ const totalTime = Date.now() - startTime;
+ print('Finished data consistency checks for cluster in ' + totalTime + ' ms.');
}
+}
})();
diff --git a/jstests/hooks/run_check_repl_dbhash_background.js b/jstests/hooks/run_check_repl_dbhash_background.js
index 5d7e2698780..d20c5f78c21 100644
--- a/jstests/hooks/run_check_repl_dbhash_background.js
+++ b/jstests/hooks/run_check_repl_dbhash_background.js
@@ -18,478 +18,472 @@
'use strict';
(function() {
- load('jstests/libs/discover_topology.js'); // For Topology and DiscoverTopology.
- load('jstests/libs/parallelTester.js'); // For ScopedThread.
+load('jstests/libs/discover_topology.js'); // For Topology and DiscoverTopology.
+load('jstests/libs/parallelTester.js'); // For ScopedThread.
- if (typeof db === 'undefined') {
- throw new Error(
- "Expected mongo shell to be connected a server, but global 'db' object isn't defined");
- }
+if (typeof db === 'undefined') {
+ throw new Error(
+ "Expected mongo shell to be connected a server, but global 'db' object isn't defined");
+}
- // We turn off printing the JavaScript stacktrace in doassert() to avoid generating an
- // overwhelming amount of log messages when handling transient errors.
- TestData = TestData || {};
- TestData.traceExceptions = false;
+// We turn off printing the JavaScript stacktrace in doassert() to avoid generating an
+// overwhelming amount of log messages when handling transient errors.
+TestData = TestData || {};
+TestData.traceExceptions = false;
- const conn = db.getMongo();
- const topology = DiscoverTopology.findConnectedNodes(conn);
+const conn = db.getMongo();
+const topology = DiscoverTopology.findConnectedNodes(conn);
- function checkReplDbhashBackgroundThread(hosts) {
- let debugInfo = [];
+function checkReplDbhashBackgroundThread(hosts) {
+ let debugInfo = [];
- // Calls 'func' with the print() function overridden to be a no-op.
- const quietly = (func) => {
- const printOriginal = print;
- try {
- print = Function.prototype;
- func();
- } finally {
- print = printOriginal;
- }
- };
+ // Calls 'func' with the print() function overridden to be a no-op.
+ const quietly = (func) => {
+ const printOriginal = print;
+ try {
+ print = Function.prototype;
+ func();
+ } finally {
+ print = printOriginal;
+ }
+ };
+
+ let rst;
+ // We construct the ReplSetTest instance with the print() function overridden to be a no-op
+ // in order to suppress the log messages about the replica set configuration. The
+ // run_check_repl_dbhash_background.js hook is executed frequently by resmoke.py and would
+ // otherwise lead to generating an overwhelming amount of log messages.
+ quietly(() => {
+ rst = new ReplSetTest(hosts[0]);
+ });
+
+ if (!rst.getPrimary().adminCommand("serverStatus").storageEngine.supportsSnapshotReadConcern) {
+ print("Skipping data consistency checks for replica set: " + rst.getURL() +
+ " because storage engine does not support snapshot reads.");
+ return {ok: 1};
+ }
+ print("Running data consistency checks for replica set: " + rst.getURL());
+
+ const sessions = [
+ rst.getPrimary(),
+ ...rst.getSecondaries().filter(conn => {
+ return !conn.adminCommand({isMaster: 1}).arbiterOnly;
+ })
+ ].map(conn => conn.startSession({causalConsistency: false}));
+
+ const resetFns = [];
+ const kForeverSeconds = 1e9;
+ const dbNames = new Set();
+
+ // We enable the "WTPreserveSnapshotHistoryIndefinitely" failpoint to ensure that the same
+ // snapshot will be available to read at on the primary and secondaries.
+ for (let session of sessions) {
+ const db = session.getDatabase('admin');
+
+ let preserveRes = assert.commandWorked(db.runCommand({
+ configureFailPoint: 'WTPreserveSnapshotHistoryIndefinitely',
+ mode: 'alwaysOn',
+ }),
+ debugInfo);
+ debugInfo.push({
+ "node": db.getMongo(),
+ "session": session,
+ "preserveFailPointOpTime": preserveRes['operationTime']
+ });
- let rst;
- // We construct the ReplSetTest instance with the print() function overridden to be a no-op
- // in order to suppress the log messages about the replica set configuration. The
- // run_check_repl_dbhash_background.js hook is executed frequently by resmoke.py and would
- // otherwise lead to generating an overwhelming amount of log messages.
- quietly(() => {
- rst = new ReplSetTest(hosts[0]);
+ resetFns.push(() => {
+ assert.commandWorked(db.runCommand({
+ configureFailPoint: 'WTPreserveSnapshotHistoryIndefinitely',
+ mode: 'off',
+ }));
});
+ }
- if (!rst.getPrimary()
- .adminCommand("serverStatus")
- .storageEngine.supportsSnapshotReadConcern) {
- print("Skipping data consistency checks for replica set: " + rst.getURL() +
- " because storage engine does not support snapshot reads.");
- return {ok: 1};
+ for (let session of sessions) {
+ const db = session.getDatabase('admin');
+ const res = assert.commandWorked(db.runCommand({listDatabases: 1, nameOnly: true}));
+ for (let dbInfo of res.databases) {
+ dbNames.add(dbInfo.name);
}
- print("Running data consistency checks for replica set: " + rst.getURL());
-
- const sessions = [
- rst.getPrimary(),
- ...rst.getSecondaries().filter(conn => {
- return !conn.adminCommand({isMaster: 1}).arbiterOnly;
- })
- ].map(conn => conn.startSession({causalConsistency: false}));
-
- const resetFns = [];
- const kForeverSeconds = 1e9;
- const dbNames = new Set();
+ debugInfo.push({
+ "node": db.getMongo(),
+ "session": session,
+ "listDatabaseOpTime": res['operationTime']
+ });
+ }
- // We enable the "WTPreserveSnapshotHistoryIndefinitely" failpoint to ensure that the same
- // snapshot will be available to read at on the primary and secondaries.
- for (let session of sessions) {
+ // Transactions cannot be run on the following databases so we don't attempt to read at a
+ // clusterTime on them either. (The "local" database is also not replicated.)
+ dbNames.delete('admin');
+ dbNames.delete('config');
+ dbNames.delete('local');
+
+ const results = [];
+
+ // The waitForSecondaries() function waits for all secondaries to have applied up to
+ // 'clusterTime' locally. This ensures that a later $_internalReadAtClusterTime read doesn't
+ // fail as a result of the secondary's clusterTime being behind 'clusterTime'.
+ const waitForSecondaries = (clusterTime, signedClusterTime) => {
+ debugInfo.push({"waitForSecondaries": clusterTime, "signedClusterTime": signedClusterTime});
+ for (let i = 1; i < sessions.length; ++i) {
+ const session = sessions[i];
const db = session.getDatabase('admin');
- let preserveRes = assert.commandWorked(db.runCommand({
- configureFailPoint: 'WTPreserveSnapshotHistoryIndefinitely',
- mode: 'alwaysOn',
- }),
- debugInfo);
- debugInfo.push({
- "node": db.getMongo(),
- "session": session,
- "preserveFailPointOpTime": preserveRes['operationTime']
- });
-
- resetFns.push(() => {
- assert.commandWorked(db.runCommand({
- configureFailPoint: 'WTPreserveSnapshotHistoryIndefinitely',
- mode: 'off',
- }));
- });
- }
+ // We advance the clusterTime on the secondary's session to ensure that
+ // 'clusterTime' doesn't exceed the node's notion of the latest clusterTime.
+ session.advanceClusterTime(signedClusterTime);
+
+ // We need to make sure the secondary has applied up to 'clusterTime' and advanced
+ // its majority commit point.
+
+ if (jsTest.options().enableMajorityReadConcern !== false) {
+ // If majority reads are supported, we can issue an afterClusterTime read on
+ // a nonexistent collection and wait on it. This has the advantage of being
+ // easier to debug in case of a timeout.
+ let res = assert.commandWorked(db.runCommand({
+ find: 'run_check_repl_dbhash_background',
+ readConcern: {level: 'majority', afterClusterTime: clusterTime},
+ limit: 1,
+ singleBatch: true,
+ }),
+ debugInfo);
+ debugInfo.push({
+ "node": db.getMongo(),
+ "session": session,
+ "majorityReadOpTime": res['operationTime']
+ });
+ } else {
+ // If majority reads are not supported, then our only option is to poll for the
+ // appliedOpTime on the secondary to catch up.
+ assert.soon(
+ function() {
+ const rsStatus =
+ assert.commandWorked(db.adminCommand({replSetGetStatus: 1}));
+
+ // The 'atClusterTime' waits for the appliedOpTime to advance to
+ // 'clusterTime'.
+ const appliedOpTime = rsStatus.optimes.appliedOpTime;
+ if (bsonWoCompare(appliedOpTime.ts, clusterTime) >= 0) {
+ debugInfo.push({
+ "node": db.getMongo(),
+ "session": session,
+ "appliedOpTime": appliedOpTime.ts
+ });
+ }
- for (let session of sessions) {
- const db = session.getDatabase('admin');
- const res = assert.commandWorked(db.runCommand({listDatabases: 1, nameOnly: true}));
- for (let dbInfo of res.databases) {
- dbNames.add(dbInfo.name);
+ return bsonWoCompare(appliedOpTime.ts, clusterTime) >= 0;
+ },
+ "The majority commit point on secondary " + i + " failed to reach " +
+ clusterTime,
+ 10 * 60 * 1000);
}
- debugInfo.push({
- "node": db.getMongo(),
- "session": session,
- "listDatabaseOpTime": res['operationTime']
- });
}
-
- // Transactions cannot be run on the following databases so we don't attempt to read at a
- // clusterTime on them either. (The "local" database is also not replicated.)
- dbNames.delete('admin');
- dbNames.delete('config');
- dbNames.delete('local');
-
- const results = [];
-
- // The waitForSecondaries() function waits for all secondaries to have applied up to
- // 'clusterTime' locally. This ensures that a later $_internalReadAtClusterTime read doesn't
- // fail as a result of the secondary's clusterTime being behind 'clusterTime'.
- const waitForSecondaries = (clusterTime, signedClusterTime) => {
- debugInfo.push(
- {"waitForSecondaries": clusterTime, "signedClusterTime": signedClusterTime});
- for (let i = 1; i < sessions.length; ++i) {
- const session = sessions[i];
- const db = session.getDatabase('admin');
-
- // We advance the clusterTime on the secondary's session to ensure that
- // 'clusterTime' doesn't exceed the node's notion of the latest clusterTime.
- session.advanceClusterTime(signedClusterTime);
-
- // We need to make sure the secondary has applied up to 'clusterTime' and advanced
- // its majority commit point.
-
- if (jsTest.options().enableMajorityReadConcern !== false) {
- // If majority reads are supported, we can issue an afterClusterTime read on
- // a nonexistent collection and wait on it. This has the advantage of being
- // easier to debug in case of a timeout.
- let res = assert.commandWorked(db.runCommand({
- find: 'run_check_repl_dbhash_background',
- readConcern: {level: 'majority', afterClusterTime: clusterTime},
- limit: 1,
- singleBatch: true,
- }),
- debugInfo);
- debugInfo.push({
- "node": db.getMongo(),
- "session": session,
- "majorityReadOpTime": res['operationTime']
- });
- } else {
- // If majority reads are not supported, then our only option is to poll for the
- // appliedOpTime on the secondary to catch up.
- assert.soon(
- function() {
- const rsStatus =
- assert.commandWorked(db.adminCommand({replSetGetStatus: 1}));
-
- // The 'atClusterTime' waits for the appliedOpTime to advance to
- // 'clusterTime'.
- const appliedOpTime = rsStatus.optimes.appliedOpTime;
- if (bsonWoCompare(appliedOpTime.ts, clusterTime) >= 0) {
- debugInfo.push({
- "node": db.getMongo(),
- "session": session,
- "appliedOpTime": appliedOpTime.ts
- });
- }
-
- return bsonWoCompare(appliedOpTime.ts, clusterTime) >= 0;
- },
- "The majority commit point on secondary " + i + " failed to reach " +
- clusterTime,
- 10 * 60 * 1000);
+ };
+
+ // The checkCollectionHashesForDB() function identifies a collection by its UUID and ignores
+ // the case where a collection isn't present on a node to work around how the collection
+ // catalog isn't multi-versioned. Unlike with ReplSetTest#checkReplicatedDataHashes(), it is
+ // possible for a collection catalog operation (e.g. a drop or rename) to have been applied
+ // on the primary but not yet applied on the secondary.
+ const checkCollectionHashesForDB = (dbName, clusterTime) => {
+ const result = [];
+ const hashes =
+ rst.getHashesUsingSessions(sessions, dbName, {readAtClusterTime: clusterTime});
+ const hashesByUUID = hashes.map((response, i) => {
+ const info = {};
+
+ for (let collName of Object.keys(response.collections)) {
+ const hash = response.collections[collName];
+ const uuid = response.uuids[collName];
+ if (uuid !== undefined) {
+ info[uuid.toString()] = {
+ host: sessions[i].getClient().host,
+ hash,
+ collName,
+ uuid,
+ };
}
}
- };
- // The checkCollectionHashesForDB() function identifies a collection by its UUID and ignores
- // the case where a collection isn't present on a node to work around how the collection
- // catalog isn't multi-versioned. Unlike with ReplSetTest#checkReplicatedDataHashes(), it is
- // possible for a collection catalog operation (e.g. a drop or rename) to have been applied
- // on the primary but not yet applied on the secondary.
- const checkCollectionHashesForDB = (dbName, clusterTime) => {
- const result = [];
- const hashes =
- rst.getHashesUsingSessions(sessions, dbName, {readAtClusterTime: clusterTime});
- const hashesByUUID = hashes.map((response, i) => {
- const info = {};
-
- for (let collName of Object.keys(response.collections)) {
- const hash = response.collections[collName];
- const uuid = response.uuids[collName];
- if (uuid !== undefined) {
- info[uuid.toString()] = {
- host: sessions[i].getClient().host,
- hash,
- collName,
- uuid,
- };
- }
- }
+ return Object.assign({}, response, {hashesByUUID: info});
+ });
- return Object.assign({}, response, {hashesByUUID: info});
- });
-
- const primarySession = sessions[0];
- for (let i = 1; i < hashes.length; ++i) {
- const uuids = new Set([
- ...Object.keys(hashesByUUID[0].hashesByUUID),
- ...Object.keys(hashesByUUID[i].hashesByUUID),
- ]);
-
- const secondarySession = sessions[i];
- for (let uuid of uuids) {
- const primaryInfo = hashesByUUID[0].hashesByUUID[uuid];
- const secondaryInfo = hashesByUUID[i].hashesByUUID[uuid];
-
- if (primaryInfo === undefined) {
- print("Skipping collection because it doesn't exist on the primary: " +
- tojsononeline(secondaryInfo));
- continue;
- }
+ const primarySession = sessions[0];
+ for (let i = 1; i < hashes.length; ++i) {
+ const uuids = new Set([
+ ...Object.keys(hashesByUUID[0].hashesByUUID),
+ ...Object.keys(hashesByUUID[i].hashesByUUID),
+ ]);
+
+ const secondarySession = sessions[i];
+ for (let uuid of uuids) {
+ const primaryInfo = hashesByUUID[0].hashesByUUID[uuid];
+ const secondaryInfo = hashesByUUID[i].hashesByUUID[uuid];
+
+ if (primaryInfo === undefined) {
+ print("Skipping collection because it doesn't exist on the primary: " +
+ tojsononeline(secondaryInfo));
+ continue;
+ }
- if (secondaryInfo === undefined) {
- print("Skipping collection because it doesn't exist on the secondary: " +
- tojsononeline(primaryInfo));
- continue;
- }
+ if (secondaryInfo === undefined) {
+ print("Skipping collection because it doesn't exist on the secondary: " +
+ tojsononeline(primaryInfo));
+ continue;
+ }
- if (primaryInfo.hash !== secondaryInfo.hash) {
- print("DBHash mismatch found for collection with uuid: " + uuid +
- ". Primary info: " + tojsononeline(primaryInfo) +
- ". Secondary info: " + tojsononeline(secondaryInfo));
- const diff = rst.getCollectionDiffUsingSessions(
- primarySession, secondarySession, dbName, primaryInfo.uuid);
-
- result.push({
- primary: primaryInfo,
- secondary: secondaryInfo,
- dbName: dbName,
- diff: diff,
- });
- }
+ if (primaryInfo.hash !== secondaryInfo.hash) {
+ print("DBHash mismatch found for collection with uuid: " + uuid +
+ ". Primary info: " + tojsononeline(primaryInfo) +
+ ". Secondary info: " + tojsononeline(secondaryInfo));
+ const diff = rst.getCollectionDiffUsingSessions(
+ primarySession, secondarySession, dbName, primaryInfo.uuid);
+
+ result.push({
+ primary: primaryInfo,
+ secondary: secondaryInfo,
+ dbName: dbName,
+ diff: diff,
+ });
}
}
+ }
- return result;
- };
+ return result;
+ };
+
+ for (let dbName of dbNames) {
+ let result;
+ let clusterTime;
+ let previousClusterTime;
+ let hasTransientError;
+ let performNoopWrite;
+
+ // The isTransientError() function is responsible for setting hasTransientError to true.
+ const isTransientError = (e) => {
+ // It is possible for the ReplSetTest#getHashesUsingSessions() function to be
+ // interrupted due to active sessions being killed by a test running concurrently.
+ // We treat this as a transient error and simply retry running the dbHash check.
+ //
+ // Note that unlike auto_retry_transaction.js, we do not treat CursorKilled or
+ // CursorNotFound error responses as transient errors because the
+ // run_check_repl_dbhash_background.js hook would only establish a cursor via
+ // ReplSetTest#getCollectionDiffUsingSessions() upon detecting a dbHash mismatch. It
+ // is presumed to still useful to know that a bug exists even if we cannot get more
+ // diagnostics for it.
+ if (e.code === ErrorCodes.Interrupted) {
+ hasTransientError = true;
+ }
- for (let dbName of dbNames) {
- let result;
- let clusterTime;
- let previousClusterTime;
- let hasTransientError;
- let performNoopWrite;
-
- // The isTransientError() function is responsible for setting hasTransientError to true.
- const isTransientError = (e) => {
- // It is possible for the ReplSetTest#getHashesUsingSessions() function to be
- // interrupted due to active sessions being killed by a test running concurrently.
- // We treat this as a transient error and simply retry running the dbHash check.
- //
- // Note that unlike auto_retry_transaction.js, we do not treat CursorKilled or
- // CursorNotFound error responses as transient errors because the
- // run_check_repl_dbhash_background.js hook would only establish a cursor via
- // ReplSetTest#getCollectionDiffUsingSessions() upon detecting a dbHash mismatch. It
- // is presumed to still useful to know that a bug exists even if we cannot get more
- // diagnostics for it.
- if (e.code === ErrorCodes.Interrupted) {
- hasTransientError = true;
+ // Perform a no-op write to the primary if the clusterTime between each call remain
+ // the same and if we encounter the SnapshotUnavailable error as the secondaries
+ // minimum timestamp can be greater than the primaries minimum timestamp.
+ if (e.code === ErrorCodes.SnapshotUnavailable) {
+ if (bsonBinaryEqual(clusterTime, previousClusterTime)) {
+ performNoopWrite = true;
}
+ hasTransientError = true;
+ }
- // Perform a no-op write to the primary if the clusterTime between each call remain
- // the same and if we encounter the SnapshotUnavailable error as the secondaries
- // minimum timestamp can be greater than the primaries minimum timestamp.
- if (e.code === ErrorCodes.SnapshotUnavailable) {
- if (bsonBinaryEqual(clusterTime, previousClusterTime)) {
- performNoopWrite = true;
- }
- hasTransientError = true;
- }
+ // InvalidOptions can be returned when $_internalReadAtClusterTime is greater than
+ // the all-committed timestamp. As the dbHash command is running in the background
+ // at varying times, it's possible that we may run dbHash while a prepared
+ // transactions has yet to commit or abort.
+ if (e.code === ErrorCodes.InvalidOptions) {
+ hasTransientError = true;
+ }
- // InvalidOptions can be returned when $_internalReadAtClusterTime is greater than
- // the all-committed timestamp. As the dbHash command is running in the background
- // at varying times, it's possible that we may run dbHash while a prepared
- // transactions has yet to commit or abort.
- if (e.code === ErrorCodes.InvalidOptions) {
- hasTransientError = true;
- }
+ return hasTransientError;
+ };
- return hasTransientError;
- };
-
- do {
- // SERVER-38928: Due to races around advancing last applied, there's technically no
- // guarantee that a primary will report a later operation time than its
- // secondaries. Perform the snapshot read at the latest reported operation time.
- previousClusterTime = clusterTime;
- clusterTime = sessions[0].getOperationTime();
- let signedClusterTime = sessions[0].getClusterTime();
- for (let sess of sessions.slice(1)) {
- let ts = sess.getOperationTime();
- if (timestampCmp(ts, clusterTime) > 0) {
- clusterTime = ts;
- signedClusterTime = sess.getClusterTime();
- }
+ do {
+ // SERVER-38928: Due to races around advancing last applied, there's technically no
+ // guarantee that a primary will report a later operation time than its
+ // secondaries. Perform the snapshot read at the latest reported operation time.
+ previousClusterTime = clusterTime;
+ clusterTime = sessions[0].getOperationTime();
+ let signedClusterTime = sessions[0].getClusterTime();
+ for (let sess of sessions.slice(1)) {
+ let ts = sess.getOperationTime();
+ if (timestampCmp(ts, clusterTime) > 0) {
+ clusterTime = ts;
+ signedClusterTime = sess.getClusterTime();
}
- waitForSecondaries(clusterTime, signedClusterTime);
+ }
+ waitForSecondaries(clusterTime, signedClusterTime);
- for (let session of sessions) {
- debugInfo.push({
- "node": session.getClient(),
- "session": session,
- "readAtClusterTime": clusterTime
- });
- }
+ for (let session of sessions) {
+ debugInfo.push({
+ "node": session.getClient(),
+ "session": session,
+ "readAtClusterTime": clusterTime
+ });
+ }
- hasTransientError = false;
- performNoopWrite = false;
-
- try {
- result = checkCollectionHashesForDB(dbName, clusterTime);
- } catch (e) {
- if (isTransientError(e)) {
- if (performNoopWrite) {
- const primarySession = sessions[0];
-
- // If the no-op write fails due to the global lock not being able to be
- // acquired within 1 millisecond, retry the operation again at a later
- // time.
- assert.commandWorkedOrFailedWithCode(
- primarySession.getDatabase(dbName).adminCommand(
- {appendOplogNote: 1, data: {}}),
- ErrorCodes.LockFailed);
- }
+ hasTransientError = false;
+ performNoopWrite = false;
- debugInfo.push({"transientError": e, "performNoopWrite": performNoopWrite});
- continue;
+ try {
+ result = checkCollectionHashesForDB(dbName, clusterTime);
+ } catch (e) {
+ if (isTransientError(e)) {
+ if (performNoopWrite) {
+ const primarySession = sessions[0];
+
+ // If the no-op write fails due to the global lock not being able to be
+ // acquired within 1 millisecond, retry the operation again at a later
+ // time.
+ assert.commandWorkedOrFailedWithCode(
+ primarySession.getDatabase(dbName).adminCommand(
+ {appendOplogNote: 1, data: {}}),
+ ErrorCodes.LockFailed);
}
- jsTestLog(debugInfo);
- throw e;
+ debugInfo.push({"transientError": e, "performNoopWrite": performNoopWrite});
+ continue;
}
- } while (hasTransientError);
- for (let mismatchInfo of result) {
- mismatchInfo.atClusterTime = clusterTime;
- results.push(mismatchInfo);
+ jsTestLog(debugInfo);
+ throw e;
}
- }
+ } while (hasTransientError);
- for (let resetFn of resetFns) {
- resetFn();
+ for (let mismatchInfo of result) {
+ mismatchInfo.atClusterTime = clusterTime;
+ results.push(mismatchInfo);
}
+ }
- const headings = [];
- let errorBlob = '';
+ for (let resetFn of resetFns) {
+ resetFn();
+ }
- for (let mismatchInfo of results) {
- const diff = mismatchInfo.diff;
- delete mismatchInfo.diff;
+ const headings = [];
+ let errorBlob = '';
- const heading =
- `dbhash mismatch for ${mismatchInfo.dbName}.${mismatchInfo.primary.collName}`;
+ for (let mismatchInfo of results) {
+ const diff = mismatchInfo.diff;
+ delete mismatchInfo.diff;
- headings.push(heading);
+ const heading =
+ `dbhash mismatch for ${mismatchInfo.dbName}.${mismatchInfo.primary.collName}`;
- if (headings.length > 1) {
- errorBlob += '\n\n';
- }
- errorBlob += heading;
- errorBlob += `: ${tojson(mismatchInfo)}`;
-
- if (diff.docsWithDifferentContents.length > 0) {
- errorBlob +=
- '\nThe following documents have different contents on the primary and' +
- ' secondary:';
- for (let {
- primary, secondary
- } of diff.docsWithDifferentContents) {
- errorBlob += `\n primary: ${tojsononeline(primary)}`;
- errorBlob += `\n secondary: ${tojsononeline(secondary)}`;
- }
- } else {
- errorBlob += '\nNo documents have different contents on the primary and secondary';
- }
+ headings.push(heading);
- if (diff.docsMissingOnPrimary.length > 0) {
- errorBlob += "\nThe following documents aren't present on the primary:";
- for (let doc of diff.docsMissingOnPrimary) {
- errorBlob += `\n ${tojsononeline(doc)}`;
- }
- } else {
- errorBlob += '\nNo documents are missing from the primary';
+ if (headings.length > 1) {
+ errorBlob += '\n\n';
+ }
+ errorBlob += heading;
+ errorBlob += `: ${tojson(mismatchInfo)}`;
+
+ if (diff.docsWithDifferentContents.length > 0) {
+ errorBlob += '\nThe following documents have different contents on the primary and' +
+ ' secondary:';
+ for (let {primary, secondary} of diff.docsWithDifferentContents) {
+ errorBlob += `\n primary: ${tojsononeline(primary)}`;
+ errorBlob += `\n secondary: ${tojsononeline(secondary)}`;
}
+ } else {
+ errorBlob += '\nNo documents have different contents on the primary and secondary';
+ }
- if (diff.docsMissingOnSecondary.length > 0) {
- errorBlob += "\nThe following documents aren't present on the secondary:";
- for (let doc of diff.docsMissingOnSecondary) {
- errorBlob += `\n ${tojsononeline(doc)}`;
- }
- } else {
- errorBlob += '\nNo documents are missing from the secondary';
+ if (diff.docsMissingOnPrimary.length > 0) {
+ errorBlob += "\nThe following documents aren't present on the primary:";
+ for (let doc of diff.docsMissingOnPrimary) {
+ errorBlob += `\n ${tojsononeline(doc)}`;
}
+ } else {
+ errorBlob += '\nNo documents are missing from the primary';
}
- if (headings.length > 0) {
- for (let session of sessions) {
- const query = {};
- const limit = 100;
- rst.dumpOplog(session.getClient(), query, limit);
+ if (diff.docsMissingOnSecondary.length > 0) {
+ errorBlob += "\nThe following documents aren't present on the secondary:";
+ for (let doc of diff.docsMissingOnSecondary) {
+ errorBlob += `\n ${tojsononeline(doc)}`;
}
+ } else {
+ errorBlob += '\nNo documents are missing from the secondary';
+ }
+ }
- print(errorBlob);
- return {
- ok: 0,
- hosts: hosts,
- error: `dbhash mismatch (search for the following headings): ${tojson(headings)}`
- };
+ if (headings.length > 0) {
+ for (let session of sessions) {
+ const query = {};
+ const limit = 100;
+ rst.dumpOplog(session.getClient(), query, limit);
}
- return {ok: 1};
+ print(errorBlob);
+ return {
+ ok: 0,
+ hosts: hosts,
+ error: `dbhash mismatch (search for the following headings): ${tojson(headings)}`
+ };
}
- if (topology.type === Topology.kReplicaSet) {
- let res = checkReplDbhashBackgroundThread(topology.nodes);
- assert.commandWorked(res, () => 'data consistency checks failed: ' + tojson(res));
- } else if (topology.type === Topology.kShardedCluster) {
- const threads = [];
- try {
- if (topology.configsvr.nodes.length > 1) {
- const thread =
- new ScopedThread(checkReplDbhashBackgroundThread, topology.configsvr.nodes);
- threads.push(thread);
- thread.start();
- } else {
- print('Skipping data consistency checks for 1-node CSRS: ' +
- tojsononeline(topology.configsvr));
- }
+ return {ok: 1};
+}
+
+if (topology.type === Topology.kReplicaSet) {
+ let res = checkReplDbhashBackgroundThread(topology.nodes);
+ assert.commandWorked(res, () => 'data consistency checks failed: ' + tojson(res));
+} else if (topology.type === Topology.kShardedCluster) {
+ const threads = [];
+ try {
+ if (topology.configsvr.nodes.length > 1) {
+ const thread =
+ new ScopedThread(checkReplDbhashBackgroundThread, topology.configsvr.nodes);
+ threads.push(thread);
+ thread.start();
+ } else {
+ print('Skipping data consistency checks for 1-node CSRS: ' +
+ tojsononeline(topology.configsvr));
+ }
- for (let shardName of Object.keys(topology.shards)) {
- const shard = topology.shards[shardName];
+ for (let shardName of Object.keys(topology.shards)) {
+ const shard = topology.shards[shardName];
- if (shard.type === Topology.kStandalone) {
- print('Skipping data consistency checks for stand-alone shard ' + shardName +
- ": " + tojsononeline(shard));
- continue;
- }
+ if (shard.type === Topology.kStandalone) {
+ print('Skipping data consistency checks for stand-alone shard ' + shardName + ": " +
+ tojsononeline(shard));
+ continue;
+ }
- if (shard.type !== Topology.kReplicaSet) {
- throw new Error('Unrecognized topology format: ' + tojson(topology));
- }
+ if (shard.type !== Topology.kReplicaSet) {
+ throw new Error('Unrecognized topology format: ' + tojson(topology));
+ }
- if (shard.nodes.length > 1) {
- const thread = new ScopedThread(checkReplDbhashBackgroundThread, shard.nodes);
- threads.push(thread);
- thread.start();
- } else {
- print('Skipping data consistency checks for stand-alone shard ' + shardName +
- ": " + tojsononeline(shard));
- }
+ if (shard.nodes.length > 1) {
+ const thread = new ScopedThread(checkReplDbhashBackgroundThread, shard.nodes);
+ threads.push(thread);
+ thread.start();
+ } else {
+ print('Skipping data consistency checks for stand-alone shard ' + shardName + ": " +
+ tojsononeline(shard));
}
- } finally {
- // Wait for each thread to finish. Throw an error if any thread fails.
- let exception;
- const returnData = threads.map(thread => {
- try {
- thread.join();
- return thread.returnData();
- } catch (e) {
- if (!exception) {
- exception = e;
- }
+ }
+ } finally {
+ // Wait for each thread to finish. Throw an error if any thread fails.
+ let exception;
+ const returnData = threads.map(thread => {
+ try {
+ thread.join();
+ return thread.returnData();
+ } catch (e) {
+ if (!exception) {
+ exception = e;
}
- });
- if (exception) {
- throw exception;
}
-
- returnData.forEach(res => {
- assert.commandWorked(res, () => 'data consistency checks failed: ' + tojson(res));
- });
+ });
+ if (exception) {
+ throw exception;
}
- } else {
- throw new Error('Unsupported topology configuration: ' + tojson(topology));
+
+ returnData.forEach(res => {
+ assert.commandWorked(res, () => 'data consistency checks failed: ' + tojson(res));
+ });
}
+} else {
+ throw new Error('Unsupported topology configuration: ' + tojson(topology));
+}
})();
diff --git a/jstests/hooks/run_check_repl_oplogs.js b/jstests/hooks/run_check_repl_oplogs.js
index 40fe76ab4ea..95a03105d0a 100644
--- a/jstests/hooks/run_check_repl_oplogs.js
+++ b/jstests/hooks/run_check_repl_oplogs.js
@@ -3,36 +3,36 @@
'use strict';
(function() {
- var startTime = Date.now();
- assert.neq(typeof db, 'undefined', 'No `db` object, is the shell connected to a mongod?');
-
- let runCheckOnReplSet = function(db) {
- let primaryInfo = db.isMaster();
-
- assert(primaryInfo.ismaster,
- 'shell is not connected to the primary or master node: ' + tojson(primaryInfo));
-
- let testFixture = new ReplSetTest(db.getMongo().host);
- testFixture.checkOplogs();
- };
-
- if (db.getMongo().isMongos()) {
- let configDB = db.getSiblingDB('config');
-
- // Run check on every shard.
- configDB.shards.find().forEach(shardEntry => {
- let newConn = new Mongo(shardEntry.host);
- runCheckOnReplSet(newConn.getDB('test'));
- });
-
- // Run check on config server.
- let cmdLineOpts = db.adminCommand({getCmdLineOpts: 1});
- let configConn = new Mongo(cmdLineOpts.parsed.sharding.configDB);
- runCheckOnReplSet(configConn.getDB('test'));
- } else {
- runCheckOnReplSet(db);
- }
-
- var totalTime = Date.now() - startTime;
- print('Finished consistency oplog checks of cluster in ' + totalTime + ' ms.');
+var startTime = Date.now();
+assert.neq(typeof db, 'undefined', 'No `db` object, is the shell connected to a mongod?');
+
+let runCheckOnReplSet = function(db) {
+ let primaryInfo = db.isMaster();
+
+ assert(primaryInfo.ismaster,
+ 'shell is not connected to the primary or master node: ' + tojson(primaryInfo));
+
+ let testFixture = new ReplSetTest(db.getMongo().host);
+ testFixture.checkOplogs();
+};
+
+if (db.getMongo().isMongos()) {
+ let configDB = db.getSiblingDB('config');
+
+ // Run check on every shard.
+ configDB.shards.find().forEach(shardEntry => {
+ let newConn = new Mongo(shardEntry.host);
+ runCheckOnReplSet(newConn.getDB('test'));
+ });
+
+ // Run check on config server.
+ let cmdLineOpts = db.adminCommand({getCmdLineOpts: 1});
+ let configConn = new Mongo(cmdLineOpts.parsed.sharding.configDB);
+ runCheckOnReplSet(configConn.getDB('test'));
+} else {
+ runCheckOnReplSet(db);
+}
+
+var totalTime = Date.now() - startTime;
+print('Finished consistency oplog checks of cluster in ' + totalTime + ' ms.');
})();
diff --git a/jstests/hooks/run_initial_sync_node_validation.js b/jstests/hooks/run_initial_sync_node_validation.js
index c0a9cc362a1..b624267f28a 100644
--- a/jstests/hooks/run_initial_sync_node_validation.js
+++ b/jstests/hooks/run_initial_sync_node_validation.js
@@ -3,48 +3,47 @@
'use strict';
(function() {
- var startTime = Date.now();
-
- var primaryInfo = db.isMaster();
- assert(primaryInfo.ismaster,
- 'shell is not connected to the primary node: ' + tojson(primaryInfo));
-
- var cmdLineOpts = db.adminCommand('getCmdLineOpts');
- assert.commandWorked(cmdLineOpts);
-
- // The initial sync hooks only work for replica sets.
- var rst = new ReplSetTest(db.getMongo().host);
-
- // Call getPrimary to populate rst with information about the nodes.
- var primary = rst.getPrimary();
- assert(primary, 'calling getPrimary() failed');
-
- // Find the hidden node.
- var hiddenNode;
- for (var secondary of rst._slaves) {
- var isMasterRes = secondary.getDB('admin').isMaster();
- if (isMasterRes.hidden) {
- hiddenNode = secondary;
- break;
- }
+var startTime = Date.now();
+
+var primaryInfo = db.isMaster();
+assert(primaryInfo.ismaster, 'shell is not connected to the primary node: ' + tojson(primaryInfo));
+
+var cmdLineOpts = db.adminCommand('getCmdLineOpts');
+assert.commandWorked(cmdLineOpts);
+
+// The initial sync hooks only work for replica sets.
+var rst = new ReplSetTest(db.getMongo().host);
+
+// Call getPrimary to populate rst with information about the nodes.
+var primary = rst.getPrimary();
+assert(primary, 'calling getPrimary() failed');
+
+// Find the hidden node.
+var hiddenNode;
+for (var secondary of rst._slaves) {
+ var isMasterRes = secondary.getDB('admin').isMaster();
+ if (isMasterRes.hidden) {
+ hiddenNode = secondary;
+ break;
}
+}
- assert(hiddenNode, 'No hidden initial sync node was found in the replica set');
+assert(hiddenNode, 'No hidden initial sync node was found in the replica set');
- // Confirm that the hidden node is in SECONDARY state.
- var res = assert.commandWorked(hiddenNode.adminCommand({replSetGetStatus: 1}));
- assert.eq(res.myState, ReplSetTest.State.SECONDARY, tojson(res));
+// Confirm that the hidden node is in SECONDARY state.
+var res = assert.commandWorked(hiddenNode.adminCommand({replSetGetStatus: 1}));
+assert.eq(res.myState, ReplSetTest.State.SECONDARY, tojson(res));
- /* The checkReplicatedDataHashes call waits until all operations have replicated to and
- have been applied on the secondaries, so we run the validation script after it
- to ensure we're validating the entire contents of the collection */
+/* The checkReplicatedDataHashes call waits until all operations have replicated to and
+ have been applied on the secondaries, so we run the validation script after it
+ to ensure we're validating the entire contents of the collection */
- // For checkDBHashes
- const excludedDBs = jsTest.options().excludedDBsFromDBHash;
- rst.checkReplicatedDataHashes(undefined, excludedDBs);
+// For checkDBHashes
+const excludedDBs = jsTest.options().excludedDBsFromDBHash;
+rst.checkReplicatedDataHashes(undefined, excludedDBs);
- load('jstests/hooks/run_validate_collections.js');
+load('jstests/hooks/run_validate_collections.js');
- var totalTime = Date.now() - startTime;
- print('Finished consistency checks of initial sync node in ' + totalTime + ' ms.');
+var totalTime = Date.now() - startTime;
+print('Finished consistency checks of initial sync node in ' + totalTime + ' ms.');
})();
diff --git a/jstests/hooks/run_validate_collections.js b/jstests/hooks/run_validate_collections.js
index 171e3cd7c00..eeabba7e10e 100644
--- a/jstests/hooks/run_validate_collections.js
+++ b/jstests/hooks/run_validate_collections.js
@@ -3,41 +3,40 @@
'use strict';
(function() {
- load('jstests/libs/discover_topology.js'); // For Topology and DiscoverTopology.
- load('jstests/hooks/validate_collections.js'); // For CollectionValidator.
-
- assert.eq(typeof db, 'object', 'Invalid `db` object, is the shell connected to a mongod?');
- const topology = DiscoverTopology.findConnectedNodes(db.getMongo());
-
- const hostList = [];
- let setFCVHost;
-
- if (topology.type === Topology.kStandalone) {
- hostList.push(topology.mongod);
- setFCVHost = topology.mongod;
- } else if (topology.type === Topology.kReplicaSet) {
- hostList.push(...topology.nodes);
- setFCVHost = topology.primary;
- } else if (topology.type === Topology.kShardedCluster) {
- hostList.push(...topology.configsvr.nodes);
-
- for (let shardName of Object.keys(topology.shards)) {
- const shard = topology.shards[shardName];
-
- if (shard.type === Topology.kStandalone) {
- hostList.push(shard.mongod);
- } else if (shard.type === Topology.kReplicaSet) {
- hostList.push(...shard.nodes);
- } else {
- throw new Error('Unrecognized topology format: ' + tojson(topology));
- }
+load('jstests/libs/discover_topology.js'); // For Topology and DiscoverTopology.
+load('jstests/hooks/validate_collections.js'); // For CollectionValidator.
+
+assert.eq(typeof db, 'object', 'Invalid `db` object, is the shell connected to a mongod?');
+const topology = DiscoverTopology.findConnectedNodes(db.getMongo());
+
+const hostList = [];
+let setFCVHost;
+
+if (topology.type === Topology.kStandalone) {
+ hostList.push(topology.mongod);
+ setFCVHost = topology.mongod;
+} else if (topology.type === Topology.kReplicaSet) {
+ hostList.push(...topology.nodes);
+ setFCVHost = topology.primary;
+} else if (topology.type === Topology.kShardedCluster) {
+ hostList.push(...topology.configsvr.nodes);
+
+ for (let shardName of Object.keys(topology.shards)) {
+ const shard = topology.shards[shardName];
+
+ if (shard.type === Topology.kStandalone) {
+ hostList.push(shard.mongod);
+ } else if (shard.type === Topology.kReplicaSet) {
+ hostList.push(...shard.nodes);
+ } else {
+ throw new Error('Unrecognized topology format: ' + tojson(topology));
}
- // Any of the mongos instances can be used for setting FCV.
- setFCVHost = topology.mongos.nodes[0];
- } else {
- throw new Error('Unrecognized topology format: ' + tojson(topology));
}
+ // Any of the mongos instances can be used for setting FCV.
+ setFCVHost = topology.mongos.nodes[0];
+} else {
+ throw new Error('Unrecognized topology format: ' + tojson(topology));
+}
- new CollectionValidator().validateNodes(hostList, setFCVHost);
-
+new CollectionValidator().validateNodes(hostList, setFCVHost);
})();
diff --git a/jstests/hooks/validate_collections.js b/jstests/hooks/validate_collections.js
index 5bfd118dcb5..a5aa67eb72e 100644
--- a/jstests/hooks/validate_collections.js
+++ b/jstests/hooks/validate_collections.js
@@ -73,8 +73,8 @@ function CollectionValidator() {
continue;
}
const host = db.getMongo().host;
- print('Collection validation failed on host ' + host + ' with response: ' +
- tojson(res));
+ print('Collection validation failed on host ' + host +
+ ' with response: ' + tojson(res));
dumpCollection(coll, 100);
full_res.failed_res.push(res);
full_res.ok = 0;