summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/core/views/views_all_commands.js1
-rw-r--r--jstests/disk/too_many_fds.js4
-rw-r--r--jstests/noPassthroughWithMongod/testing_only_commands.js1
-rw-r--r--jstests/replsets/server_status_metrics.js5
-rw-r--r--jstests/sharding/safe_secondary_reads_drop_recreate.js1
-rw-r--r--jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js1
-rw-r--r--jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js1
7 files changed, 12 insertions, 2 deletions
diff --git a/jstests/core/views/views_all_commands.js b/jstests/core/views/views_all_commands.js
index 7038e69b794..261c570aa5f 100644
--- a/jstests/core/views/views_all_commands.js
+++ b/jstests/core/views/views_all_commands.js
@@ -318,6 +318,7 @@
invalidateUserCache: {skip: isUnrelated},
isdbgrid: {skip: isUnrelated},
isMaster: {skip: isUnrelated},
+ journalLatencyTest: {skip: isUnrelated},
killCursors: {
setup: function(conn) {
assert.writeOK(conn.collection.remove({}));
diff --git a/jstests/disk/too_many_fds.js b/jstests/disk/too_many_fds.js
index b8334bbb717..f7fa331b956 100644
--- a/jstests/disk/too_many_fds.js
+++ b/jstests/disk/too_many_fds.js
@@ -4,7 +4,7 @@
function doTest() {
var baseName = "jstests_disk_too_many_fds";
- var m = MongoRunner.runMongod();
+ var m = MongoRunner.runMongod({nssize: 1});
// Make 1026 collections, each in a separate database. On some storage engines, this may cause
// 1026 files to be created.
for (var i = 1; i < 1026; ++i) {
@@ -16,7 +16,7 @@ function doTest() {
MongoRunner.stopMongod(m);
// Ensure we can still start up with that many files.
- var m2 = MongoRunner.runMongod({dbpath: m.dbpath, restart: true, cleanData: false});
+ var m2 = MongoRunner.runMongod({dbpath: m.dbpath, nssize: 1, restart: true, cleanData: false});
assert.eq(1, m2.getDB("db1025").getCollection("coll1025").count());
MongoRunner.stopMongod(m2);
diff --git a/jstests/noPassthroughWithMongod/testing_only_commands.js b/jstests/noPassthroughWithMongod/testing_only_commands.js
index ad142cc1ecc..55187010cc1 100644
--- a/jstests/noPassthroughWithMongod/testing_only_commands.js
+++ b/jstests/noPassthroughWithMongod/testing_only_commands.js
@@ -7,6 +7,7 @@ var testOnlyCommands = [
'configureFailPoint',
'_hashBSONElement',
'replSetTest',
+ 'journalLatencyTest',
'godinsert',
'sleep',
'cpuload',
diff --git a/jstests/replsets/server_status_metrics.js b/jstests/replsets/server_status_metrics.js
index b6d8bf4b92d..9b805b92e4c 100644
--- a/jstests/replsets/server_status_metrics.js
+++ b/jstests/replsets/server_status_metrics.js
@@ -20,6 +20,11 @@ function testSecondaryMetrics(secondary, opCount, offset) {
assert(ss.metrics.repl.buffer.sizeBytes >= 0, "size (bytes)] missing");
assert(ss.metrics.repl.buffer.maxSizeBytes >= 0, "maxSize (bytes) missing");
+ assert(ss.metrics.repl.preload.docs.num >= 0, "preload.docs num missing");
+ assert(ss.metrics.repl.preload.docs.totalMillis >= 0, "preload.docs time missing");
+ assert(ss.metrics.repl.preload.docs.num >= 0, "preload.indexes num missing");
+ assert(ss.metrics.repl.preload.indexes.totalMillis >= 0, "preload.indexes time missing");
+
assert(ss.metrics.repl.apply.batches.num > 0, "no batches");
assert(ss.metrics.repl.apply.batches.totalMillis >= 0, "missing batch time");
assert.eq(ss.metrics.repl.apply.ops, opCount + offset, "wrong number of applied ops");
diff --git a/jstests/sharding/safe_secondary_reads_drop_recreate.js b/jstests/sharding/safe_secondary_reads_drop_recreate.js
index 97a69868b3c..d706aa14ad0 100644
--- a/jstests/sharding/safe_secondary_reads_drop_recreate.js
+++ b/jstests/sharding/safe_secondary_reads_drop_recreate.js
@@ -195,6 +195,7 @@
invalidateUserCache: {skip: "does not return user data"},
isdbgrid: {skip: "does not return user data"},
isMaster: {skip: "does not return user data"},
+ journalLatencyTest: {skip: "does not return user data"},
killAllSessions: {skip: "does not return user data"},
killAllSessionsByPattern: {skip: "does not return user data"},
killCursors: {skip: "does not return user data"},
diff --git a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
index 90323e2783b..f7ba41f1272 100644
--- a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
+++ b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
@@ -225,6 +225,7 @@
invalidateUserCache: {skip: "does not return user data"},
isdbgrid: {skip: "does not return user data"},
isMaster: {skip: "does not return user data"},
+ journalLatencyTest: {skip: "does not return user data"},
killCursors: {skip: "does not return user data"},
killAllSessions: {skip: "does not return user data"},
killAllSessionsByPattern: {skip: "does not return user data"},
diff --git a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
index 7aef155ecd4..df9c243ab35 100644
--- a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
+++ b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
@@ -198,6 +198,7 @@
invalidateUserCache: {skip: "does not return user data"},
isdbgrid: {skip: "does not return user data"},
isMaster: {skip: "does not return user data"},
+ journalLatencyTest: {skip: "does not return user data"},
killAllSessions: {skip: "does not return user data"},
killAllSessionsByPattern: {skip: "does not return user data"},
killCursors: {skip: "does not return user data"},