diff options
author | Haley Connelly <haley.connelly@mongodb.com> | 2020-04-01 14:28:15 -0400 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2020-04-06 19:07:42 +0000 |
commit | fa8d794f88daadeb1992b809c0309da2fb362d03 (patch) | |
tree | 383dcdb76743f6050463155f5056972c6fc77b47 | |
parent | 4d82d10588dbeca498e46d51a36b6efdf8379af1 (diff) | |
download | mongo-fa8d794f88daadeb1992b809c0309da2fb362d03.tar.gz |
SERVER-42632 Disable pinger threads that reach out to config server to make integration tests pass
-rw-r--r-- | jstests/noPassthrough/readConcern_atClusterTime_noop_write.js | 29 | ||||
-rw-r--r-- | src/mongo/s/catalog/replset_dist_lock_manager.cpp | 9 | ||||
-rw-r--r-- | src/mongo/s/sharding_uptime_reporter.cpp | 7 |
3 files changed, 44 insertions, 1 deletions
diff --git a/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js b/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js index c065ae258aa..080a8871289 100644 --- a/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js +++ b/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js @@ -5,6 +5,7 @@ (function() { "use strict"; load("jstests/replsets/rslib.js"); +load("jstests/libs/fail_point_util.js"); // Skip this test if running with --nojournal and WiredTiger. if (jsTest.options().noJournal && @@ -23,7 +24,33 @@ if (!assert.commandWorked(conn.getDB("test").serverStatus()) } MongoRunner.stopMongod(conn); -const st = new ShardingTest({shards: 2, rs: {nodes: 2}}); +// On the config server the lastApplied optime can go past the atClusterTime timestamp due to pings +// made on collection config.mongos or config.lockping by the distributed lock pinger thread and +// sharding uptime reporter thread. Hence, it will not write the no-op oplog entry on the config +// server as part of waiting for read concern. +// For more deterministic testing of no-op writes to the oplog, disable pinger threads from reaching +// out to the config server. +const failpointParams = { + setParameter: {"failpoint.disableReplSetDistLockManager": "{mode: 'alwaysOn'}"} +}; + +// The ShardingUptimeReporter only exists on mongos. +const mongosFailpointParams = { + setParameter: { + "failpoint.disableReplSetDistLockManager": "{mode: 'alwaysOn'}", + "failpoint.disableShardingUptimeReporterPeriodicThread": "{mode: 'alwaysOn'}" + } +}; + +const st = new ShardingTest({ + shards: 2, + rs: {nodes: 2}, + other: { + configOptions: failpointParams, + rsOptions: failpointParams, + mongosOptions: mongosFailpointParams, + } +}); // Create database "test0" on shard 0. const testDB0 = st.s.getDB("test0"); diff --git a/src/mongo/s/catalog/replset_dist_lock_manager.cpp b/src/mongo/s/catalog/replset_dist_lock_manager.cpp index 6bfda8f7158..ac9759c4c99 100644 --- a/src/mongo/s/catalog/replset_dist_lock_manager.cpp +++ b/src/mongo/s/catalog/replset_dist_lock_manager.cpp @@ -61,6 +61,8 @@ using std::unique_ptr; namespace { +MONGO_FAIL_POINT_DEFINE(disableReplSetDistLockManager); + // How many times to retry acquiring the lock after the first attempt fails const int kMaxNumLockAcquireRetries = 2; @@ -136,6 +138,13 @@ void ReplSetDistLockManager::doTask() { Client::initThread("replSetDistLockPinger"); while (!isShutDown()) { + if (MONGO_unlikely(disableReplSetDistLockManager.shouldFail())) { + LOGV2(426321, + "The distributed lock ping thread is disabled for testing", + "processId"_attr = _processID, + "pingInterval"_attr = _pingInterval); + return; + } { auto opCtx = cc().makeOperationContext(); auto pingStatus = _catalog->ping(opCtx.get(), _processID, Date_t::now()); diff --git a/src/mongo/s/sharding_uptime_reporter.cpp b/src/mongo/s/sharding_uptime_reporter.cpp index 39d03689ade..9745ddd79fb 100644 --- a/src/mongo/s/sharding_uptime_reporter.cpp +++ b/src/mongo/s/sharding_uptime_reporter.cpp @@ -50,6 +50,8 @@ namespace mongo { namespace { +MONGO_FAIL_POINT_DEFINE(disableShardingUptimeReporterPeriodicThread); + const Seconds kUptimeReportInterval(10); std::string constructInstanceIdString(const std::string& hostName) { @@ -110,6 +112,11 @@ void ShardingUptimeReporter::startPeriodicThread() { const Timer upTimeTimer; while (!globalInShutdownDeprecated()) { + if (MONGO_unlikely(disableShardingUptimeReporterPeriodicThread.shouldFail())) { + LOGV2(426322, + "The sharding uptime reporter periodic thread is disabled for testing"); + return; + } { auto opCtx = cc().makeOperationContext(); reportStatus(opCtx.get(), instanceId, hostName, upTimeTimer); |