summaryrefslogtreecommitdiff
path: root/jstests/noPassthrough/data_consistency_checks.js
diff options
context:
space:
mode:
Diffstat (limited to 'jstests/noPassthrough/data_consistency_checks.js')
-rw-r--r--jstests/noPassthrough/data_consistency_checks.js355
1 files changed, 176 insertions, 179 deletions
diff --git a/jstests/noPassthrough/data_consistency_checks.js b/jstests/noPassthrough/data_consistency_checks.js
index dcddefaf882..94c44f3e49b 100644
--- a/jstests/noPassthrough/data_consistency_checks.js
+++ b/jstests/noPassthrough/data_consistency_checks.js
@@ -9,193 +9,190 @@
var db;
(function() {
- "use strict";
-
- // We skip doing the data consistency checks while terminating the cluster because they conflict
- // with the counts of the number of times the "dbhash" and "validate" commands are run.
- TestData.skipCollectionAndIndexValidation = true;
- TestData.skipCheckDBHashes = true;
-
- function makePatternForDBHash(dbName) {
- return new RegExp("COMMAND.*command " + dbName +
- "\\.\\$cmd appName: \"MongoDB Shell\" command: db[Hh]ash",
- "g");
+"use strict";
+
+// We skip doing the data consistency checks while terminating the cluster because they conflict
+// with the counts of the number of times the "dbhash" and "validate" commands are run.
+TestData.skipCollectionAndIndexValidation = true;
+TestData.skipCheckDBHashes = true;
+
+function makePatternForDBHash(dbName) {
+ return new RegExp(
+ "COMMAND.*command " + dbName + "\\.\\$cmd appName: \"MongoDB Shell\" command: db[Hh]ash",
+ "g");
+}
+
+function makePatternForValidate(dbName, collName) {
+ return new RegExp("COMMAND.*command " + dbName +
+ "\\.\\$cmd appName: \"MongoDB Shell\" command: validate { validate: \"" +
+ collName + "\"",
+ "g");
+}
+
+function countMatches(pattern, output) {
+ assert(pattern.global, "the 'g' flag must be used to find all matches");
+
+ let numMatches = 0;
+ while (pattern.exec(output) !== null) {
+ ++numMatches;
}
-
- function makePatternForValidate(dbName, collName) {
- return new RegExp(
- "COMMAND.*command " + dbName +
- "\\.\\$cmd appName: \"MongoDB Shell\" command: validate { validate: \"" + collName +
- "\"",
- "g");
+ return numMatches;
+}
+
+function runDataConsistencyChecks(testCase) {
+ db = testCase.conn.getDB("test");
+ try {
+ clearRawMongoProgramOutput();
+
+ load("jstests/hooks/run_check_repl_dbhash.js");
+ load("jstests/hooks/run_validate_collections.js");
+
+ // We terminate the processes to ensure that the next call to rawMongoProgramOutput()
+ // will return all of their output.
+ testCase.teardown();
+ return rawMongoProgramOutput();
+ } finally {
+ db = undefined;
}
+}
+
+(function testReplicaSetWithVotingSecondaries() {
+ const numNodes = 2;
+ const rst = new ReplSetTest({
+ nodes: numNodes,
+ nodeOptions: {
+ setParameter: {logComponentVerbosity: tojson({command: 1})},
+ }
+ });
+ rst.startSet();
+ rst.initiateWithNodeZeroAsPrimary();
+
+ // Insert a document so the "dbhash" and "validate" commands have some actual work to do.
+ assert.commandWorked(rst.nodes[0].getDB("test").mycoll.insert({}));
+ const output = runDataConsistencyChecks({conn: rst.nodes[0], teardown: () => rst.stopSet()});
+
+ let pattern = makePatternForDBHash("test");
+ assert.eq(numNodes,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) + " from each node in the log output");
+
+ pattern = makePatternForValidate("test", "mycoll");
+ assert.eq(numNodes,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) + " from each node in the log output");
+})();
- function countMatches(pattern, output) {
- assert(pattern.global, "the 'g' flag must be used to find all matches");
-
- let numMatches = 0;
- while (pattern.exec(output) !== null) {
- ++numMatches;
+(function testReplicaSetWithNonVotingSecondaries() {
+ const numNodes = 2;
+ const rst = new ReplSetTest({
+ nodes: numNodes,
+ nodeOptions: {
+ setParameter: {logComponentVerbosity: tojson({command: 1})},
}
- return numMatches;
+ });
+ rst.startSet();
+
+ const replSetConfig = rst.getReplSetConfig();
+ for (let i = 1; i < numNodes; ++i) {
+ replSetConfig.members[i].priority = 0;
+ replSetConfig.members[i].votes = 0;
}
+ rst.initiate(replSetConfig);
- function runDataConsistencyChecks(testCase) {
- db = testCase.conn.getDB("test");
- try {
- clearRawMongoProgramOutput();
+ // Insert a document so the "dbhash" and "validate" commands have some actual work to do.
+ assert.commandWorked(rst.nodes[0].getDB("test").mycoll.insert({}));
+ const output = runDataConsistencyChecks({conn: rst.nodes[0], teardown: () => rst.stopSet()});
- load("jstests/hooks/run_check_repl_dbhash.js");
- load("jstests/hooks/run_validate_collections.js");
+ let pattern = makePatternForDBHash("test");
+ assert.eq(numNodes,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) + " from each node in the log output");
- // We terminate the processes to ensure that the next call to rawMongoProgramOutput()
- // will return all of their output.
- testCase.teardown();
- return rawMongoProgramOutput();
- } finally {
- db = undefined;
- }
- }
+ pattern = makePatternForValidate("test", "mycoll");
+ assert.eq(numNodes,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) + " from each node in the log output");
+})();
- (function testReplicaSetWithVotingSecondaries() {
- const numNodes = 2;
- const rst = new ReplSetTest({
- nodes: numNodes,
- nodeOptions: {
- setParameter: {logComponentVerbosity: tojson({command: 1})},
- }
- });
- rst.startSet();
- rst.initiateWithNodeZeroAsPrimary();
-
- // Insert a document so the "dbhash" and "validate" commands have some actual work to do.
- assert.commandWorked(rst.nodes[0].getDB("test").mycoll.insert({}));
- const output =
- runDataConsistencyChecks({conn: rst.nodes[0], teardown: () => rst.stopSet()});
-
- let pattern = makePatternForDBHash("test");
- assert.eq(numNodes,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) + " from each node in the log output");
-
- pattern = makePatternForValidate("test", "mycoll");
- assert.eq(numNodes,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) + " from each node in the log output");
- })();
-
- (function testReplicaSetWithNonVotingSecondaries() {
- const numNodes = 2;
- const rst = new ReplSetTest({
- nodes: numNodes,
- nodeOptions: {
- setParameter: {logComponentVerbosity: tojson({command: 1})},
- }
- });
- rst.startSet();
-
- const replSetConfig = rst.getReplSetConfig();
- for (let i = 1; i < numNodes; ++i) {
- replSetConfig.members[i].priority = 0;
- replSetConfig.members[i].votes = 0;
+(function testShardedClusterWithOneNodeCSRS() {
+ const st = new ShardingTest({
+ mongos: 1,
+ config: 1,
+ configOptions: {
+ setParameter: {logComponentVerbosity: tojson({command: 1})},
+ },
+ shards: 1
+ });
+
+ // We shard a collection in order to guarantee that at least one collection on the "config"
+ // database exists for when we go to run the data consistency checks against the CSRS.
+ st.shardColl(st.s.getDB("test").mycoll, {_id: 1}, false);
+
+ const output = runDataConsistencyChecks({conn: st.s, teardown: () => st.stop()});
+
+ let pattern = makePatternForDBHash("config");
+ assert.eq(0,
+ countMatches(pattern, output),
+ "expected not to find " + tojson(pattern) + " in the log output for 1-node CSRS");
+
+ // The choice of using the "config.collections" collection here is mostly arbitrary as the
+ // "config.databases" and "config.chunks" collections are also implicitly created as part of
+ // sharding a collection.
+ pattern = makePatternForValidate("config", "collections");
+ assert.eq(1,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) + " in the log output for 1-node CSRS");
+})();
+
+(function testShardedCluster() {
+ const st = new ShardingTest({
+ mongos: 1,
+ config: 3,
+ configOptions: {
+ setParameter: {logComponentVerbosity: tojson({command: 1})},
+ },
+ shards: 1,
+ rs: {nodes: 2},
+ rsOptions: {
+ setParameter: {logComponentVerbosity: tojson({command: 1})},
}
- rst.initiate(replSetConfig);
-
- // Insert a document so the "dbhash" and "validate" commands have some actual work to do.
- assert.commandWorked(rst.nodes[0].getDB("test").mycoll.insert({}));
- const output =
- runDataConsistencyChecks({conn: rst.nodes[0], teardown: () => rst.stopSet()});
-
- let pattern = makePatternForDBHash("test");
- assert.eq(numNodes,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) + " from each node in the log output");
-
- pattern = makePatternForValidate("test", "mycoll");
- assert.eq(numNodes,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) + " from each node in the log output");
- })();
-
- (function testShardedClusterWithOneNodeCSRS() {
- const st = new ShardingTest({
- mongos: 1,
- config: 1,
- configOptions: {
- setParameter: {logComponentVerbosity: tojson({command: 1})},
- },
- shards: 1
- });
-
- // We shard a collection in order to guarantee that at least one collection on the "config"
- // database exists for when we go to run the data consistency checks against the CSRS.
- st.shardColl(st.s.getDB("test").mycoll, {_id: 1}, false);
-
- const output = runDataConsistencyChecks({conn: st.s, teardown: () => st.stop()});
-
- let pattern = makePatternForDBHash("config");
- assert.eq(0,
- countMatches(pattern, output),
- "expected not to find " + tojson(pattern) + " in the log output for 1-node CSRS");
-
- // The choice of using the "config.collections" collection here is mostly arbitrary as the
- // "config.databases" and "config.chunks" collections are also implicitly created as part of
- // sharding a collection.
- pattern = makePatternForValidate("config", "collections");
- assert.eq(1,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) + " in the log output for 1-node CSRS");
- })();
-
- (function testShardedCluster() {
- const st = new ShardingTest({
- mongos: 1,
- config: 3,
- configOptions: {
- setParameter: {logComponentVerbosity: tojson({command: 1})},
- },
- shards: 1,
- rs: {nodes: 2},
- rsOptions: {
- setParameter: {logComponentVerbosity: tojson({command: 1})},
- }
- });
-
- // We shard a collection in order to guarantee that at least one collection on the "config"
- // database exists for when we go to run the data consistency checks against the CSRS.
- st.shardColl(st.s.getDB("test").mycoll, {_id: 1}, false);
-
- // Insert a document so the "dbhash" and "validate" commands have some actual work to do on
- // the replica set shard.
- assert.commandWorked(st.s.getDB("test").mycoll.insert({_id: 0}));
- const output = runDataConsistencyChecks({conn: st.s, teardown: () => st.stop()});
-
- // The "config" database exists on both the CSRS and the replica set shards due to the
- // "config.transactions" collection.
- let pattern = makePatternForDBHash("config");
- assert.eq(5,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) +
- " from each CSRS node and each replica set shard node in the log output");
-
- // The choice of using the "config.collections" collection here is mostly arbitrary as the
- // "config.databases" and "config.chunks" collections are also implicitly created as part of
- // sharding a collection.
- pattern = makePatternForValidate("config", "collections");
- assert.eq(3,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) + " from each CSRS node in the log output");
-
- pattern = makePatternForDBHash("test");
- assert.eq(2,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) +
- " from each replica set shard node in the log output");
-
- pattern = makePatternForValidate("test", "mycoll");
- assert.eq(2,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) +
- " from each replica set shard node in the log output");
- })();
+ });
+
+ // We shard a collection in order to guarantee that at least one collection on the "config"
+ // database exists for when we go to run the data consistency checks against the CSRS.
+ st.shardColl(st.s.getDB("test").mycoll, {_id: 1}, false);
+
+ // Insert a document so the "dbhash" and "validate" commands have some actual work to do on
+ // the replica set shard.
+ assert.commandWorked(st.s.getDB("test").mycoll.insert({_id: 0}));
+ const output = runDataConsistencyChecks({conn: st.s, teardown: () => st.stop()});
+
+ // The "config" database exists on both the CSRS and the replica set shards due to the
+ // "config.transactions" collection.
+ let pattern = makePatternForDBHash("config");
+ assert.eq(5,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) +
+ " from each CSRS node and each replica set shard node in the log output");
+
+ // The choice of using the "config.collections" collection here is mostly arbitrary as the
+ // "config.databases" and "config.chunks" collections are also implicitly created as part of
+ // sharding a collection.
+ pattern = makePatternForValidate("config", "collections");
+ assert.eq(3,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) + " from each CSRS node in the log output");
+
+ pattern = makePatternForDBHash("test");
+ assert.eq(2,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) +
+ " from each replica set shard node in the log output");
+
+ pattern = makePatternForValidate("test", "mycoll");
+ assert.eq(2,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) +
+ " from each replica set shard node in the log output");
+})();
})();