summaryrefslogtreecommitdiff
path: root/src/mongo/util
diff options
context:
space:
mode:
authorJordi Olivares Provencio <jordi.olivares-provencio@mongodb.com>2022-12-12 17:03:40 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-12-12 17:59:23 +0000
commit1dff382d88776c4c2e83f81cc8299dbf3cd0b313 (patch)
treed41e97c5f2b8d134c8d1814947d43d37af1c2534 /src/mongo/util
parentbb7a7e20dfb1a86bb9378ea4254275ef78592d72 (diff)
downloadmongo-1dff382d88776c4c2e83f81cc8299dbf3cd0b313.tar.gz
SERVER-71829 Add latency distribution to ticketholder microbenchmarks
Diffstat (limited to 'src/mongo/util')
-rw-r--r--src/mongo/util/SConscript1
-rw-r--r--src/mongo/util/concurrency/ticketholder_bm.cpp42
-rw-r--r--src/mongo/util/latency_distribution.h119
-rw-r--r--src/mongo/util/latency_distribution_test.cpp67
4 files changed, 228 insertions, 1 deletions
diff --git a/src/mongo/util/SConscript b/src/mongo/util/SConscript
index 557b98ef165..1cec160602e 100644
--- a/src/mongo/util/SConscript
+++ b/src/mongo/util/SConscript
@@ -754,6 +754,7 @@ icuEnv.CppUnitTest(
'invalidating_lru_cache_test.cpp',
'itoa_test.cpp',
'latch_analyzer_test.cpp' if get_option('use-diagnostic-latches') == 'on' else [],
+ 'latency_distribution_test.cpp',
'lockable_adapter_test.cpp',
'log_with_sampling_test.cpp',
'lru_cache_test.cpp',
diff --git a/src/mongo/util/concurrency/ticketholder_bm.cpp b/src/mongo/util/concurrency/ticketholder_bm.cpp
index d6983df1dca..8abe59fc51e 100644
--- a/src/mongo/util/concurrency/ticketholder_bm.cpp
+++ b/src/mongo/util/concurrency/ticketholder_bm.cpp
@@ -38,7 +38,9 @@
#include "mongo/util/concurrency/priority_ticketholder.h"
#include "mongo/util/concurrency/semaphore_ticketholder.h"
#include "mongo/util/concurrency/ticketholder.h"
+#include "mongo/util/latency_distribution.h"
#include "mongo/util/tick_source_mock.h"
+#include "mongo/util/timer.h"
namespace mongo {
namespace {
@@ -93,9 +95,14 @@ template <class TicketHolderImpl, AdmissionsPriority admissionsPriority>
void BM_acquireAndRelease(benchmark::State& state) {
static std::unique_ptr<TicketHolderFixture<TicketHolderImpl>> ticketHolder;
static ServiceContext::UniqueServiceContext serviceContext;
+ static constexpr auto resolution = Microseconds{100};
+ static LatencyPercentileDistribution resultingDistribution(resolution);
+ static int numRemainingToMerge;
{
stdx::unique_lock lk(isReadyMutex);
if (state.thread_index == 0) {
+ resultingDistribution = LatencyPercentileDistribution{resolution};
+ numRemainingToMerge = state.threads;
serviceContext = ServiceContext::make();
serviceContext->setTickSource(std::make_unique<TickSourceMock<Microseconds>>());
serviceContext->registerClientObserver(std::make_unique<LockerNoopClientObserver>());
@@ -125,25 +132,58 @@ void BM_acquireAndRelease(benchmark::State& state) {
}();
TicketHolderFixture<TicketHolderImpl>* fixture = ticketHolder.get();
+ // We build the latency distribution locally in order to avoid synchronizing with other threads.
+ // All of them will be merged at the end instead.
+ LatencyPercentileDistribution localDistribution{resolution};
+
for (auto _ : state) {
+ Timer timer;
+ Microseconds timeForAcquire;
AdmissionContext admCtx;
admCtx.setPriority(priority);
auto opCtx = fixture->opCtxs[state.thread_index].get();
{
- auto ticket = fixture->ticketHolder->waitForTicket(opCtx, &admCtx, waitMode);
+ auto ticket =
+ fixture->ticketHolder->waitForTicketUntil(opCtx, &admCtx, Date_t::max(), waitMode);
+ timeForAcquire = timer.elapsed();
state.PauseTiming();
sleepmicros(1);
acquired++;
state.ResumeTiming();
+ // We reset the timer here to ignore the time spent doing artificial sleeping for time
+ // spent doing acquire and release. Release will be performed as part of the ticket
+ // destructor.
+ timer.reset();
}
+ localDistribution.addEntry(timeForAcquire + timer.elapsed());
}
state.counters["Acquired"] = benchmark::Counter(acquired, benchmark::Counter::kIsRate);
state.counters["AcquiredPerThread"] =
benchmark::Counter(acquired, benchmark::Counter::kAvgThreadsRate);
+ // Merge all latency distributions in order to get the full view of all threads.
+ {
+ stdx::unique_lock lk(isReadyMutex);
+ resultingDistribution = resultingDistribution.mergeWith(localDistribution);
+ numRemainingToMerge--;
+ if (numRemainingToMerge > 0) {
+ isReadyCv.wait(lk, [&] { return numRemainingToMerge == 0; });
+ } else {
+ isReadyCv.notify_all();
+ }
+ }
if (state.thread_index == 0) {
ticketHolder.reset();
serviceContext.reset();
isReady = false;
+ state.counters["AcqRel50"] =
+ benchmark::Counter(resultingDistribution.getPercentile(0.5).count());
+ state.counters["AcqRel95"] =
+ benchmark::Counter(resultingDistribution.getPercentile(0.95).count());
+ state.counters["AcqRel99"] =
+ benchmark::Counter(resultingDistribution.getPercentile(0.99).count());
+ state.counters["AcqRel99.9"] =
+ benchmark::Counter(resultingDistribution.getPercentile(0.999).count());
+ state.counters["AcqRelMax"] = benchmark::Counter(resultingDistribution.getMax().count());
}
}
diff --git a/src/mongo/util/latency_distribution.h b/src/mongo/util/latency_distribution.h
new file mode 100644
index 00000000000..88f76692d51
--- /dev/null
+++ b/src/mongo/util/latency_distribution.h
@@ -0,0 +1,119 @@
+/**
+ * Copyright (C) 2022-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+#pragma once
+
+#include <map>
+
+#include "mongo/util/duration.h"
+
+namespace mongo {
+/**
+ * A class containing the latency distribution of operations performed. To avoid memory explosion
+ * the distribution is computed using buckets of a user-provided resolution.
+ */
+class LatencyPercentileDistribution {
+public:
+ LatencyPercentileDistribution(Microseconds resolution) : _resolution(resolution){};
+
+ // Adds the provided duration entry into the distribution
+ void addEntry(Microseconds duration) {
+ // As the entries are stored in each bucket we need to acquire the correct key for the
+ // provided duration. In this case we want to bucket rounding up to the resolution
+ // desired. This can be accomplished by performing integer division that might round down
+ // and checking if it needs to be rounded up instead. That is to say if remainder of the
+ // division was larger than 0.
+ auto key = (duration + _resolution - Microseconds{1}) / _resolution.count();
+ _orderedBuckets[key]++;
+ _totalEntries++;
+ }
+
+ // Merges this distribution with another one, giving the combined result of the two.
+ //
+ // This is useful in multithreaded scenarios as each thread can update a local copy and then
+ // merge all of them at the end, avoiding all concurrency synchronisation to the bare minimum.
+ LatencyPercentileDistribution mergeWith(const LatencyPercentileDistribution& other) {
+ LatencyPercentileDistribution result(_resolution);
+ for (const auto& [key, count] : this->_orderedBuckets) {
+ result._orderedBuckets[key] += count;
+ }
+ for (const auto& [key, count] : other._orderedBuckets) {
+ result._orderedBuckets[key] += count;
+ }
+ result._totalEntries = this->_totalEntries + other._totalEntries;
+ return result;
+ }
+
+ // Obtain the provided percentile of latency. The returned value will be an approximation at
+ // most off by one resolution unit.
+ Microseconds getPercentile(float percentile) const {
+ int64_t scannedEntries = 0;
+ auto targetEntries = static_cast<int64_t>(_totalEntries * percentile);
+ // The buckets are sorted so we iteratively add the percentile values until we surpass the
+ // target.
+ auto iter = _orderedBuckets.begin();
+ Microseconds previousMicrosecondsKey{0};
+ while (scannedEntries < targetEntries && iter != _orderedBuckets.end()) {
+ auto [key, value] = *iter;
+ auto newMicrosecondsKey = key;
+ if (scannedEntries + value >= targetEntries) {
+ // We need to perform the inverse operation on the key for the actual value. As we
+ // divide by resolution we now multiply by it in order to get the original value
+ // back.
+ auto newMicros = newMicrosecondsKey * _resolution.count();
+ auto previousMicros = previousMicrosecondsKey * _resolution.count();
+ auto interpolationRate =
+ (targetEntries - scannedEntries) / static_cast<float>(value);
+ auto differenceMicroseconds = (newMicros - previousMicros).count();
+ auto interpolatedMicros = interpolationRate * differenceMicroseconds;
+ return previousMicros + Microseconds{static_cast<int64_t>(interpolatedMicros)};
+ } else {
+ previousMicrosecondsKey = newMicrosecondsKey;
+ scannedEntries += value;
+ iter++;
+ }
+ }
+ return Microseconds{0};
+ }
+
+ Microseconds getMax() const {
+ auto end = *_orderedBuckets.rbegin();
+ return end.first * _resolution.count();
+ }
+
+ int64_t numEntries() const {
+ return _totalEntries;
+ }
+
+private:
+ Microseconds _resolution;
+ // We use a std::map as it guarantees preserving the ordering based on the key provided.
+ std::map<Microseconds, int32_t> _orderedBuckets;
+ int64_t _totalEntries = 0;
+};
+} // namespace mongo
diff --git a/src/mongo/util/latency_distribution_test.cpp b/src/mongo/util/latency_distribution_test.cpp
new file mode 100644
index 00000000000..cda6e6f54c6
--- /dev/null
+++ b/src/mongo/util/latency_distribution_test.cpp
@@ -0,0 +1,67 @@
+/**
+ * Copyright (C) 2022-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/unittest/unittest.h"
+
+#include "mongo/util/latency_distribution.h"
+
+namespace mongo {
+
+TEST(LatencyDistributionTest, WorksWithInterpolation) {
+ constexpr auto resolution = Microseconds{100};
+ LatencyPercentileDistribution distribution{resolution};
+
+ for (int i = 0; i < 100; i++) {
+ distribution.addEntry(resolution);
+ }
+
+ ASSERT_EQ(distribution.getPercentile(0), Microseconds{0});
+ ASSERT_EQ(distribution.getPercentile(0.75), Microseconds{75});
+}
+
+TEST(LatencyDistributionTest, MergesWorkCorrectly) {
+ constexpr auto resolution = Microseconds{100};
+ LatencyPercentileDistribution distribution1{resolution};
+ LatencyPercentileDistribution distribution2{resolution};
+
+ for (int i = 0; i < 100; i++) {
+ distribution1.addEntry(resolution);
+ distribution2.addEntry(resolution * 2);
+ }
+
+ auto merged = distribution1.mergeWith(distribution2);
+ ASSERT_EQ(merged.numEntries(), 200);
+ ASSERT_EQ(merged.getMax(), Microseconds{200});
+ ASSERT_EQ(merged.getPercentile(0.6), Microseconds{120});
+ ASSERT_EQ(merged.getPercentile(0.5), Microseconds{100});
+}
+
+} // namespace mongo