summaryrefslogtreecommitdiff
path: root/jstests/sharding/health_monitor/progress_monitor.js
diff options
context:
space:
mode:
authorDavis Haupt <davis.haupt@mongodb.com>2021-12-15 21:26:52 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-12-15 22:04:48 +0000
commitb884feb946d3a991b37e5ebeb5944eb8c5edf361 (patch)
tree2940f90b98cde4bdc54c9a631521f82d6c7c0c6e /jstests/sharding/health_monitor/progress_monitor.js
parent10fbdd7be2fbe0dad422762a3ccd0eab30f45bc3 (diff)
downloadmongo-b884feb946d3a991b37e5ebeb5944eb8c5edf361.tar.gz
SERVER-61220 Integration test for progress monitor
Diffstat (limited to 'jstests/sharding/health_monitor/progress_monitor.js')
-rw-r--r--jstests/sharding/health_monitor/progress_monitor.js50
1 files changed, 50 insertions, 0 deletions
diff --git a/jstests/sharding/health_monitor/progress_monitor.js b/jstests/sharding/health_monitor/progress_monitor.js
new file mode 100644
index 00000000000..fc243da4401
--- /dev/null
+++ b/jstests/sharding/health_monitor/progress_monitor.js
@@ -0,0 +1,50 @@
+const PROGRESS_TIMEOUT_SECONDS = 5;
+const CHECK_PING_SECONDS = 1;
+(function() {
+'use strict';
+
+const params = {
+ setParameter: {
+ healthMonitoringIntensities: tojson({test: "non-critical", ldap: "off", dns: "off"}),
+ healthMonitoringIntervals: tojson({test: 500}),
+ progressMonitor: tojson({deadline: PROGRESS_TIMEOUT_SECONDS}),
+ featureFlagHealthMonitoring: true
+ }
+};
+let st = new ShardingTest({
+ mongos: [params, params],
+ shards: 1,
+});
+// After cluster startup, make sure both mongos's are available.
+assert.commandWorked(st.s0.adminCommand({"ping": 1}));
+assert.commandWorked(st.s1.adminCommand({"ping": 1}));
+
+// Set the failpoint on one of the mongos's to pause its healthchecks.
+assert.commandWorked(
+ st.s1.adminCommand({"configureFailPoint": 'hangTestHealthObserver', "mode": "alwaysOn"}));
+sleep(CHECK_PING_SECONDS * 1000);
+// Make sure the failpoint on its own doesn't bring down the server.
+assert.commandWorked(st.s1.adminCommand({"ping": 1}));
+// Wait for the progress monitor timeout to elapse.
+sleep(PROGRESS_TIMEOUT_SECONDS * 1000);
+
+assert.soon(() => {
+ try {
+ assert.commandWorked(st.s0.adminCommand({"ping": 1})); // Ensure s0 is unaffected.
+ st.s1.adminCommand(
+ {"ping": 1}); // This should throw an error because s1 is no longer reachable.
+ assert(false, "ping command to s1 should fail.");
+ } catch (e) {
+ // This might seem brittle to rely on the string message for the error, but the same check
+ // appears in the implementation for runCommand().
+ if (e.message.indexOf("network error") >= 0) {
+ return true;
+ } else {
+ throw (e);
+ }
+ }
+}, "Pinging faulty mongos should fail with network error.", PROGRESS_TIMEOUT_SECONDS * 1000);
+// Don't validate exit codes, since a mongos will exit on its own with a non-zero exit code.
+
+st.stop({skipValidatingExitCode: true});
+})();