summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHaley Connelly <haley.connelly@mongodb.com>2020-04-01 14:28:15 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-04-22 17:44:37 +0000
commit93f89d90ff3829181f9103f93780f32264dff929 (patch)
treeb5b490f5623f5ff9565fb9b80150d12406c8f14e
parentdda2fb45cbf624c9270f8fad7f3c5c5a2f0834eb (diff)
downloadmongo-93f89d90ff3829181f9103f93780f32264dff929.tar.gz
SERVER-42632 Disable pinger threads that reach out to config server to make integration tests pass
(cherry picked from commit fa8d794f88daadeb1992b809c0309da2fb362d03)
-rw-r--r--jstests/noPassthrough/readConcern_atClusterTime_noop_write.js29
-rw-r--r--src/mongo/s/catalog/replset_dist_lock_manager.cpp9
-rw-r--r--src/mongo/s/sharding_uptime_reporter.cpp7
3 files changed, 44 insertions, 1 deletions
diff --git a/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js b/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js
index c065ae258aa..080a8871289 100644
--- a/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js
+++ b/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js
@@ -5,6 +5,7 @@
(function() {
"use strict";
load("jstests/replsets/rslib.js");
+load("jstests/libs/fail_point_util.js");
// Skip this test if running with --nojournal and WiredTiger.
if (jsTest.options().noJournal &&
@@ -23,7 +24,33 @@ if (!assert.commandWorked(conn.getDB("test").serverStatus())
}
MongoRunner.stopMongod(conn);
-const st = new ShardingTest({shards: 2, rs: {nodes: 2}});
+// On the config server the lastApplied optime can go past the atClusterTime timestamp due to pings
+// made on collection config.mongos or config.lockping by the distributed lock pinger thread and
+// sharding uptime reporter thread. Hence, it will not write the no-op oplog entry on the config
+// server as part of waiting for read concern.
+// For more deterministic testing of no-op writes to the oplog, disable pinger threads from reaching
+// out to the config server.
+const failpointParams = {
+ setParameter: {"failpoint.disableReplSetDistLockManager": "{mode: 'alwaysOn'}"}
+};
+
+// The ShardingUptimeReporter only exists on mongos.
+const mongosFailpointParams = {
+ setParameter: {
+ "failpoint.disableReplSetDistLockManager": "{mode: 'alwaysOn'}",
+ "failpoint.disableShardingUptimeReporterPeriodicThread": "{mode: 'alwaysOn'}"
+ }
+};
+
+const st = new ShardingTest({
+ shards: 2,
+ rs: {nodes: 2},
+ other: {
+ configOptions: failpointParams,
+ rsOptions: failpointParams,
+ mongosOptions: mongosFailpointParams,
+ }
+});
// Create database "test0" on shard 0.
const testDB0 = st.s.getDB("test0");
diff --git a/src/mongo/s/catalog/replset_dist_lock_manager.cpp b/src/mongo/s/catalog/replset_dist_lock_manager.cpp
index 09745931f44..0a8c8ceffcd 100644
--- a/src/mongo/s/catalog/replset_dist_lock_manager.cpp
+++ b/src/mongo/s/catalog/replset_dist_lock_manager.cpp
@@ -62,6 +62,8 @@ using std::unique_ptr;
namespace {
+MONGO_FAIL_POINT_DEFINE(disableReplSetDistLockManager);
+
// How many times to retry acquiring the lock after the first attempt fails
const int kMaxNumLockAcquireRetries = 2;
@@ -137,6 +139,13 @@ void ReplSetDistLockManager::doTask() {
Client::initThread("replSetDistLockPinger");
while (!isShutDown()) {
+ if (MONGO_unlikely(disableReplSetDistLockManager.shouldFail())) {
+ LOGV2(426321,
+ "The distributed lock ping thread is disabled for testing",
+ "processId"_attr = _processID,
+ "pingInterval"_attr = _pingInterval);
+ return;
+ }
{
auto opCtx = cc().makeOperationContext();
auto pingStatus = _catalog->ping(opCtx.get(), _processID, Date_t::now());
diff --git a/src/mongo/s/sharding_uptime_reporter.cpp b/src/mongo/s/sharding_uptime_reporter.cpp
index 39d03689ade..9745ddd79fb 100644
--- a/src/mongo/s/sharding_uptime_reporter.cpp
+++ b/src/mongo/s/sharding_uptime_reporter.cpp
@@ -50,6 +50,8 @@
namespace mongo {
namespace {
+MONGO_FAIL_POINT_DEFINE(disableShardingUptimeReporterPeriodicThread);
+
const Seconds kUptimeReportInterval(10);
std::string constructInstanceIdString(const std::string& hostName) {
@@ -110,6 +112,11 @@ void ShardingUptimeReporter::startPeriodicThread() {
const Timer upTimeTimer;
while (!globalInShutdownDeprecated()) {
+ if (MONGO_unlikely(disableShardingUptimeReporterPeriodicThread.shouldFail())) {
+ LOGV2(426322,
+ "The sharding uptime reporter periodic thread is disabled for testing");
+ return;
+ }
{
auto opCtx = cc().makeOperationContext();
reportStatus(opCtx.get(), instanceId, hostName, upTimeTimer);