summaryrefslogtreecommitdiff
path: root/src/mongo/executor
diff options
context:
space:
mode:
authorBen Caimano <ben.caimano@10gen.com>2019-02-20 11:13:41 -0500
committerBen Caimano <ben.caimano@10gen.com>2019-02-20 11:32:26 -0500
commit6cdb28ab8df5dff06be82b4c46971fe5298c6f46 (patch)
treeb82034af1582f0b3ff7554a07f990c2b211ab3bb /src/mongo/executor
parent601ed1b88afe54f79e39c298cd2c578795bfc17b (diff)
downloadmongo-6cdb28ab8df5dff06be82b4c46971fe5298c6f46.tar.gz
SERVER-37412 Added LogSeverityLimiter for timed logging
Diffstat (limited to 'src/mongo/executor')
-rw-r--r--src/mongo/executor/connection_pool.cpp13
-rw-r--r--src/mongo/executor/connection_pool_tl.cpp3
2 files changed, 11 insertions, 5 deletions
diff --git a/src/mongo/executor/connection_pool.cpp b/src/mongo/executor/connection_pool.cpp
index b70bf5b57db..c0c4293e505 100644
--- a/src/mongo/executor/connection_pool.cpp
+++ b/src/mongo/executor/connection_pool.cpp
@@ -725,6 +725,12 @@ void ConnectionPool::SpecificPool::processFailure(const Status& status,
// connections
_generation++;
+ if (!_readyPool.empty() || !_processingPool.empty()) {
+ auto severity = MONGO_GET_LIMITED_SEVERITY(_hostAndPort, Seconds{1}, 0, 2);
+ LOG(severity) << "Dropping all pooled connections to " << _hostAndPort << " due to "
+ << redact(status);
+ }
+
// When a connection enters the ready pool, its timer is set to eventually refresh the
// connection. This requires a lifetime extension of the specific pool because the connection
// timer is tied to the lifetime of the connection, not the pool. That said, we can destruct
@@ -732,9 +738,6 @@ void ConnectionPool::SpecificPool::processFailure(const Status& status,
// In short, clearing the ready pool helps the SpecificPool drain.
_readyPool.clear();
- // Log something helpful
- log() << "Dropping all pooled connections to " << _hostAndPort << " due to " << status;
-
// Migrate processing connections to the dropped pool
for (auto&& x : _processingPool) {
if (_state != State::kInShutdown) {
@@ -823,6 +826,10 @@ void ConnectionPool::SpecificPool::spawnConnections(stdx::unique_lock<stdx::mute
while ((_state != State::kInShutdown) &&
(_readyPool.size() + _processingPool.size() + _checkedOutPool.size() < target()) &&
(_processingPool.size() < _parent->_options.maxConnecting)) {
+ if (_readyPool.empty() && _processingPool.empty()) {
+ auto severity = MONGO_GET_LIMITED_SEVERITY(_hostAndPort, Seconds{1}, 0, 2);
+ LOG(severity) << "Connecting to " << _hostAndPort;
+ }
OwnedConnection handle;
try {
diff --git a/src/mongo/executor/connection_pool_tl.cpp b/src/mongo/executor/connection_pool_tl.cpp
index f8163a2b4e3..25141e09a1e 100644
--- a/src/mongo/executor/connection_pool_tl.cpp
+++ b/src/mongo/executor/connection_pool_tl.cpp
@@ -212,7 +212,6 @@ void TLConnection::setup(Milliseconds timeout, SetupCallback cb) {
std::move(pf.future).getAsync(
[ this, cb = std::move(cb), anchor ](Status status) { cb(this, std::move(status)); });
- log() << "Connecting to " << _peer;
setTimeout(timeout, [this, handler, timeout] {
if (handler->done.swap(true)) {
return;
@@ -266,7 +265,7 @@ void TLConnection::setup(Milliseconds timeout, SetupCallback cb) {
if (status.isOK()) {
handler->promise.emplaceValue();
} else {
- log() << "Failed to connect to " << _peer << " - " << redact(status);
+ LOG(2) << "Failed to connect to " << _peer << " - " << redact(status);
handler->promise.setError(status);
}
});