summaryrefslogtreecommitdiff
path: root/src/mongo/util
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/util')
-rw-r--r--src/mongo/util/alarm.cpp14
-rw-r--r--src/mongo/util/alarm.h6
-rw-r--r--src/mongo/util/alarm_runner_background_thread.cpp8
-rw-r--r--src/mongo/util/alarm_runner_background_thread.h4
-rw-r--r--src/mongo/util/background.cpp28
-rw-r--r--src/mongo/util/background_job_test.cpp6
-rw-r--r--src/mongo/util/background_thread_clock_source.h6
-rw-r--r--src/mongo/util/clock_source.cpp10
-rw-r--r--src/mongo/util/clock_source.h2
-rw-r--r--src/mongo/util/clock_source_mock.cpp1
-rw-r--r--src/mongo/util/clock_source_mock.h2
-rw-r--r--src/mongo/util/concurrency/notification.h16
-rw-r--r--src/mongo/util/concurrency/spin_lock.h4
-rw-r--r--src/mongo/util/concurrency/thread_pool.cpp22
-rw-r--r--src/mongo/util/concurrency/thread_pool.h10
-rw-r--r--src/mongo/util/concurrency/thread_pool_test.cpp20
-rw-r--r--src/mongo/util/concurrency/thread_pool_test_common.cpp10
-rw-r--r--src/mongo/util/concurrency/ticketholder.cpp12
-rw-r--r--src/mongo/util/concurrency/ticketholder.h8
-rw-r--r--src/mongo/util/concurrency/with_lock.h16
-rw-r--r--src/mongo/util/concurrency/with_lock_test.cpp14
-rw-r--r--src/mongo/util/diagnostic_info.cpp118
-rw-r--r--src/mongo/util/diagnostic_info.h52
-rw-r--r--src/mongo/util/diagnostic_info_test.cpp39
-rw-r--r--src/mongo/util/exit.cpp18
-rw-r--r--src/mongo/util/fail_point.cpp4
-rw-r--r--src/mongo/util/fail_point.h4
-rw-r--r--src/mongo/util/fail_point_test.cpp15
-rw-r--r--src/mongo/util/future_impl.h12
-rw-r--r--src/mongo/util/heap_profiler.cpp6
-rw-r--r--src/mongo/util/interruptible.h8
-rw-r--r--src/mongo/util/invalidating_lru_cache.h14
-rw-r--r--src/mongo/util/lockable_adapter_test.cpp6
-rw-r--r--src/mongo/util/net/http_client_curl.cpp8
-rw-r--r--src/mongo/util/net/ssl_manager_openssl.cpp11
-rw-r--r--src/mongo/util/options_parser/options_parser_test.cpp6
-rw-r--r--src/mongo/util/periodic_runner.h2
-rw-r--r--src/mongo/util/periodic_runner_impl.cpp12
-rw-r--r--src/mongo/util/periodic_runner_impl.h6
-rw-r--r--src/mongo/util/periodic_runner_impl_test.cpp70
-rw-r--r--src/mongo/util/processinfo.h2
-rw-r--r--src/mongo/util/producer_consumer_queue.h36
-rw-r--r--src/mongo/util/producer_consumer_queue_test.cpp24
-rw-r--r--src/mongo/util/queue.h36
-rw-r--r--src/mongo/util/signal_handlers_synchronous.cpp4
-rw-r--r--src/mongo/util/stacktrace_windows.cpp2
-rw-r--r--src/mongo/util/synchronized_value.h38
-rw-r--r--src/mongo/util/time_support.h2
-rw-r--r--src/mongo/util/uuid.cpp6
49 files changed, 394 insertions, 386 deletions
diff --git a/src/mongo/util/alarm.cpp b/src/mongo/util/alarm.cpp
index b3236a9ef5b..95a3a88dfd3 100644
--- a/src/mongo/util/alarm.cpp
+++ b/src/mongo/util/alarm.cpp
@@ -50,7 +50,7 @@ public:
return {ErrorCodes::ShutdownInProgress, "The alarm scheduler was shutdown"};
}
- stdx::unique_lock<stdx::mutex> lk(service->_mutex);
+ stdx::unique_lock<Latch> lk(service->_mutex);
if (_done) {
return {ErrorCodes::AlarmAlreadyFulfilled, "The alarm has already been canceled"};
}
@@ -80,7 +80,7 @@ AlarmSchedulerPrecise::~AlarmSchedulerPrecise() {
}
AlarmScheduler::Alarm AlarmSchedulerPrecise::alarmAt(Date_t date) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_shutdown) {
Alarm ret;
ret.future = Future<void>::makeReady(
@@ -107,7 +107,7 @@ void AlarmSchedulerPrecise::processExpiredAlarms(
std::vector<Promise<void>> toExpire;
AlarmMapIt it;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
for (it = _alarms.begin(); it != _alarms.end();) {
if (hook && !(*hook)(processed + 1)) {
break;
@@ -135,22 +135,22 @@ void AlarmSchedulerPrecise::processExpiredAlarms(
}
Date_t AlarmSchedulerPrecise::nextAlarm() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return (_alarms.empty()) ? Date_t::max() : _alarms.begin()->first;
}
void AlarmSchedulerPrecise::clearAllAlarms() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_clearAllAlarmsImpl(lk);
}
void AlarmSchedulerPrecise::clearAllAlarmsAndShutdown() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_shutdown = true;
_clearAllAlarmsImpl(lk);
}
-void AlarmSchedulerPrecise::_clearAllAlarmsImpl(stdx::unique_lock<stdx::mutex>& lk) {
+void AlarmSchedulerPrecise::_clearAllAlarmsImpl(stdx::unique_lock<Latch>& lk) {
std::vector<Promise<void>> toExpire;
for (AlarmMapIt it = _alarms.begin(); it != _alarms.end();) {
toExpire.push_back(std::move(it->second.promise));
diff --git a/src/mongo/util/alarm.h b/src/mongo/util/alarm.h
index 449284a3b21..9727a133e69 100644
--- a/src/mongo/util/alarm.h
+++ b/src/mongo/util/alarm.h
@@ -32,7 +32,7 @@
#include <memory>
#include "mongo/base/status.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/clock_source.h"
#include "mongo/util/functional.h"
#include "mongo/util/future.h"
@@ -185,9 +185,9 @@ private:
using AlarmMap = std::multimap<Date_t, AlarmData>;
using AlarmMapIt = AlarmMap::iterator;
- void _clearAllAlarmsImpl(stdx::unique_lock<stdx::mutex>& lk);
+ void _clearAllAlarmsImpl(stdx::unique_lock<Latch>& lk);
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("AlarmSchedulerPrecise::_mutex");
bool _shutdown = false;
AlarmMap _alarms;
};
diff --git a/src/mongo/util/alarm_runner_background_thread.cpp b/src/mongo/util/alarm_runner_background_thread.cpp
index 4d22f84e87d..d0a27927246 100644
--- a/src/mongo/util/alarm_runner_background_thread.cpp
+++ b/src/mongo/util/alarm_runner_background_thread.cpp
@@ -34,13 +34,13 @@
namespace mongo {
void AlarmRunnerBackgroundThread::start() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_running = true;
_thread = stdx::thread(&AlarmRunnerBackgroundThread::_threadRoutine, this);
}
void AlarmRunnerBackgroundThread::shutdown() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_running = false;
lk.unlock();
_condVar.notify_one();
@@ -56,7 +56,7 @@ AlarmRunnerBackgroundThread::_initializeSchedulers(std::vector<AlarmSchedulerHan
invariant(!schedulers.empty());
const auto registerHook = [this](Date_t next, const std::shared_ptr<AlarmScheduler>& which) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (next >= _nextAlarm) {
return;
}
@@ -81,7 +81,7 @@ AlarmRunnerBackgroundThread::_initializeSchedulers(std::vector<AlarmSchedulerHan
}
void AlarmRunnerBackgroundThread::_threadRoutine() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (_running) {
const auto clockSource = _schedulers.front()->clockSource();
const auto now = clockSource->now();
diff --git a/src/mongo/util/alarm_runner_background_thread.h b/src/mongo/util/alarm_runner_background_thread.h
index 179f6350480..5f2b5d486cc 100644
--- a/src/mongo/util/alarm_runner_background_thread.h
+++ b/src/mongo/util/alarm_runner_background_thread.h
@@ -28,7 +28,7 @@
*/
#pragma once
-#include "mongo/stdx/condition_variable.h"
+#include "mongo/platform/condition_variable.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/alarm.h"
#include "mongo/util/concurrency/with_lock.h"
@@ -64,7 +64,7 @@ private:
void _threadRoutine();
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("AlarmRunnerBackgroundThread::_mutex");
stdx::condition_variable _condVar;
bool _running = false;
Date_t _nextAlarm = Date_t::max();
diff --git a/src/mongo/util/background.cpp b/src/mongo/util/background.cpp
index e5b40238230..7f34141dfad 100644
--- a/src/mongo/util/background.cpp
+++ b/src/mongo/util/background.cpp
@@ -36,8 +36,8 @@
#include <functional>
#include "mongo/config.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/concurrency/idle_thread_block.h"
#include "mongo/util/concurrency/mutex.h"
@@ -80,7 +80,7 @@ private:
void _runTask(PeriodicTask* task);
// _mutex protects the _shutdownRequested flag and the _tasks vector.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("PeriodicTaskRunner::_mutex");
// The condition variable is used to sleep for the interval between task
// executions, and is notified when the _shutdownRequested flag is toggled.
@@ -129,7 +129,7 @@ bool runnerDestroyed = false;
struct BackgroundJob::JobStatus {
JobStatus() : state(NotStarted) {}
- stdx::mutex mutex;
+ Mutex mutex = MONGO_MAKE_LATCH("JobStatus::mutex");
stdx::condition_variable done;
State state;
};
@@ -154,7 +154,7 @@ void BackgroundJob::jobBody() {
{
// It is illegal to access any state owned by this BackgroundJob after leaving this
// scope, with the exception of the call to 'delete this' below.
- stdx::unique_lock<stdx::mutex> l(_status->mutex);
+ stdx::unique_lock<Latch> l(_status->mutex);
_status->state = Done;
_status->done.notify_all();
}
@@ -164,7 +164,7 @@ void BackgroundJob::jobBody() {
}
void BackgroundJob::go() {
- stdx::unique_lock<stdx::mutex> l(_status->mutex);
+ stdx::unique_lock<Latch> l(_status->mutex);
massert(17234,
str::stream() << "backgroundJob already running: " << name(),
_status->state != Running);
@@ -178,7 +178,7 @@ void BackgroundJob::go() {
}
Status BackgroundJob::cancel() {
- stdx::unique_lock<stdx::mutex> l(_status->mutex);
+ stdx::unique_lock<Latch> l(_status->mutex);
if (_status->state == Running)
return Status(ErrorCodes::IllegalOperation, "Cannot cancel a running BackgroundJob");
@@ -194,7 +194,7 @@ Status BackgroundJob::cancel() {
bool BackgroundJob::wait(unsigned msTimeOut) {
verify(!_selfDelete); // you cannot call wait on a self-deleting job
const auto deadline = Date_t::now() + Milliseconds(msTimeOut);
- stdx::unique_lock<stdx::mutex> l(_status->mutex);
+ stdx::unique_lock<Latch> l(_status->mutex);
while (_status->state != Done) {
if (msTimeOut) {
if (stdx::cv_status::timeout ==
@@ -208,12 +208,12 @@ bool BackgroundJob::wait(unsigned msTimeOut) {
}
BackgroundJob::State BackgroundJob::getState() const {
- stdx::unique_lock<stdx::mutex> l(_status->mutex);
+ stdx::unique_lock<Latch> l(_status->mutex);
return _status->state;
}
bool BackgroundJob::running() const {
- stdx::unique_lock<stdx::mutex> l(_status->mutex);
+ stdx::unique_lock<Latch> l(_status->mutex);
return _status->state == Running;
}
@@ -268,12 +268,12 @@ Status PeriodicTask::stopRunningPeriodicTasks(int gracePeriodMillis) {
}
void PeriodicTaskRunner::add(PeriodicTask* task) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_tasks.push_back(task);
}
void PeriodicTaskRunner::remove(PeriodicTask* task) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
for (size_t i = 0; i != _tasks.size(); i++) {
if (_tasks[i] == task) {
_tasks[i] = nullptr;
@@ -284,7 +284,7 @@ void PeriodicTaskRunner::remove(PeriodicTask* task) {
Status PeriodicTaskRunner::stop(int gracePeriodMillis) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_shutdownRequested = true;
_cond.notify_one();
}
@@ -300,7 +300,7 @@ void PeriodicTaskRunner::run() {
// Use a shorter cycle time in debug mode to help catch race conditions.
const Seconds waitTime(kDebugBuild ? 5 : 60);
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
while (!_shutdownRequested) {
{
MONGO_IDLE_THREAD_BLOCK;
diff --git a/src/mongo/util/background_job_test.cpp b/src/mongo/util/background_job_test.cpp
index efca4fdbfa1..f95090d11a3 100644
--- a/src/mongo/util/background_job_test.cpp
+++ b/src/mongo/util/background_job_test.cpp
@@ -30,7 +30,7 @@
#include "mongo/platform/basic.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/background.h"
@@ -114,7 +114,7 @@ TEST(BackgroundJobLifeCycle, Go) {
virtual void run() {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
ASSERT_FALSE(_hasRun);
_hasRun = true;
}
@@ -127,7 +127,7 @@ TEST(BackgroundJobLifeCycle, Go) {
}
private:
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("Job::_mutex");
bool _hasRun;
Notification<void> _n;
};
diff --git a/src/mongo/util/background_thread_clock_source.h b/src/mongo/util/background_thread_clock_source.h
index 4d106780601..b7c8feed705 100644
--- a/src/mongo/util/background_thread_clock_source.h
+++ b/src/mongo/util/background_thread_clock_source.h
@@ -34,8 +34,8 @@
#include <thread>
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/clock_source.h"
#include "mongo/util/time_support.h"
@@ -93,7 +93,7 @@ private:
const Milliseconds _granularity;
- stdx::mutex _mutex;
+ stdx::mutex _mutex; // NOLINT
stdx::condition_variable _condition;
bool _inShutdown = false;
bool _started = false;
diff --git a/src/mongo/util/clock_source.cpp b/src/mongo/util/clock_source.cpp
index 6e81e0708fa..6f54a1bcc76 100644
--- a/src/mongo/util/clock_source.cpp
+++ b/src/mongo/util/clock_source.cpp
@@ -27,10 +27,10 @@
* it in the license file.
*/
+#include "mongo/util/clock_source.h"
#include "mongo/platform/basic.h"
-
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
-#include "mongo/util/clock_source.h"
#include "mongo/util/waitable.h"
namespace mongo {
@@ -55,7 +55,7 @@ stdx::cv_status ClockSource::waitForConditionUntil(stdx::condition_variable& cv,
}
struct AlarmInfo {
- stdx::mutex controlMutex;
+ Mutex controlMutex = MONGO_MAKE_LATCH("AlarmInfo::controlMutex");
BasicLockableAdapter* waitLock;
stdx::condition_variable* waitCV;
stdx::cv_status cvWaitResult = stdx::cv_status::no_timeout;
@@ -66,7 +66,7 @@ stdx::cv_status ClockSource::waitForConditionUntil(stdx::condition_variable& cv,
const auto waiterThreadId = stdx::this_thread::get_id();
bool invokedAlarmInline = false;
invariant(setAlarm(deadline, [alarmInfo, waiterThreadId, &invokedAlarmInline] {
- stdx::lock_guard<stdx::mutex> controlLk(alarmInfo->controlMutex);
+ stdx::lock_guard<Latch> controlLk(alarmInfo->controlMutex);
alarmInfo->cvWaitResult = stdx::cv_status::timeout;
if (!alarmInfo->waitLock) {
return;
@@ -86,7 +86,7 @@ stdx::cv_status ClockSource::waitForConditionUntil(stdx::condition_variable& cv,
Waitable::wait(waitable, this, cv, m);
}
m.unlock();
- stdx::lock_guard<stdx::mutex> controlLk(alarmInfo->controlMutex);
+ stdx::lock_guard<Latch> controlLk(alarmInfo->controlMutex);
m.lock();
alarmInfo->waitLock = nullptr;
alarmInfo->waitCV = nullptr;
diff --git a/src/mongo/util/clock_source.h b/src/mongo/util/clock_source.h
index 34b004f2712..c27d22b1dd3 100644
--- a/src/mongo/util/clock_source.h
+++ b/src/mongo/util/clock_source.h
@@ -39,7 +39,7 @@
namespace mongo {
-class Date_t;
+class Waitable;
/**
* An interface for getting the current wall clock time.
diff --git a/src/mongo/util/clock_source_mock.cpp b/src/mongo/util/clock_source_mock.cpp
index d05eb765722..0319e67b481 100644
--- a/src/mongo/util/clock_source_mock.cpp
+++ b/src/mongo/util/clock_source_mock.cpp
@@ -29,6 +29,7 @@
#include "mongo/platform/basic.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/clock_source_mock.h"
#include <algorithm>
diff --git a/src/mongo/util/clock_source_mock.h b/src/mongo/util/clock_source_mock.h
index 24c6851a240..689a03832f7 100644
--- a/src/mongo/util/clock_source_mock.h
+++ b/src/mongo/util/clock_source_mock.h
@@ -69,7 +69,7 @@ private:
using Alarm = std::pair<Date_t, unique_function<void()>>;
void _processAlarms(stdx::unique_lock<stdx::mutex> lk);
- stdx::mutex _mutex;
+ stdx::mutex _mutex; // NOLINT
Date_t _now{Date_t::fromMillisSinceEpoch(1)};
std::vector<Alarm> _alarms;
};
diff --git a/src/mongo/util/concurrency/notification.h b/src/mongo/util/concurrency/notification.h
index 44bc7efc9ac..50e6b5e2302 100644
--- a/src/mongo/util/concurrency/notification.h
+++ b/src/mongo/util/concurrency/notification.h
@@ -32,8 +32,8 @@
#include <boost/optional.hpp>
#include "mongo/db/operation_context.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/duration.h"
#include "mongo/util/time_support.h"
@@ -59,7 +59,7 @@ public:
* block).
*/
explicit operator bool() const {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
return !!_value;
}
@@ -68,7 +68,7 @@ public:
* If the wait is interrupted, throws an exception.
*/
T& get(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
opCtx->waitForConditionOrInterrupt(_condVar, lock, [this]() -> bool { return !!_value; });
return _value.get();
}
@@ -78,7 +78,7 @@ public:
* This variant of get cannot be interrupted.
*/
T& get() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
while (!_value) {
_condVar.wait(lock);
}
@@ -91,7 +91,7 @@ public:
* call. Must only be called once for the lifetime of the notification.
*/
void set(T value) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(!_value);
_value = std::move(value);
_condVar.notify_all();
@@ -104,13 +104,13 @@ public:
* If the wait is interrupted, throws an exception.
*/
bool waitFor(OperationContext* opCtx, Milliseconds waitTimeout) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
return opCtx->waitForConditionOrInterruptFor(
_condVar, lock, waitTimeout, [&]() { return !!_value; });
}
private:
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("Notification::_mutex");
stdx::condition_variable _condVar;
// Protected by mutex and only moves from not-set to set once
diff --git a/src/mongo/util/concurrency/spin_lock.h b/src/mongo/util/concurrency/spin_lock.h
index 7f237dc3175..5c5a17b4b74 100644
--- a/src/mongo/util/concurrency/spin_lock.h
+++ b/src/mongo/util/concurrency/spin_lock.h
@@ -37,7 +37,7 @@
#include "mongo/config.h"
#include "mongo/platform/compiler.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -86,7 +86,7 @@ public:
}
private:
- stdx::mutex _mutex;
+ stdx::mutex _mutex; // NOLINT
};
#else
diff --git a/src/mongo/util/concurrency/thread_pool.cpp b/src/mongo/util/concurrency/thread_pool.cpp
index fd8d23377ea..ceaf9fcaf7e 100644
--- a/src/mongo/util/concurrency/thread_pool.cpp
+++ b/src/mongo/util/concurrency/thread_pool.cpp
@@ -79,7 +79,7 @@ ThreadPool::Options cleanUpOptions(ThreadPool::Options&& options) {
ThreadPool::ThreadPool(Options options) : _options(cleanUpOptions(std::move(options))) {}
ThreadPool::~ThreadPool() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_shutdown_inlock();
if (shutdownComplete != _state) {
_join_inlock(&lk);
@@ -94,7 +94,7 @@ ThreadPool::~ThreadPool() {
}
void ThreadPool::startup() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_state != preStart) {
severe() << "Attempting to start pool " << _options.poolName
<< ", but it has already started";
@@ -110,7 +110,7 @@ void ThreadPool::startup() {
}
void ThreadPool::shutdown() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_shutdown_inlock();
}
@@ -130,11 +130,11 @@ void ThreadPool::_shutdown_inlock() {
}
void ThreadPool::join() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_join_inlock(&lk);
}
-void ThreadPool::_join_inlock(stdx::unique_lock<stdx::mutex>* lk) {
+void ThreadPool::_join_inlock(stdx::unique_lock<Latch>* lk) {
_stateChange.wait(*lk, [this] {
switch (_state) {
case preStart:
@@ -177,7 +177,7 @@ void ThreadPool::_drainPendingTasks() {
<< _options.threadNamePrefix << _nextThreadId++;
setThreadName(threadName);
_options.onCreateThread(threadName);
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
while (!_pendingTasks.empty()) {
_doOneTask(&lock);
}
@@ -186,7 +186,7 @@ void ThreadPool::_drainPendingTasks() {
}
void ThreadPool::schedule(Task task) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
switch (_state) {
case joinRequired:
@@ -221,7 +221,7 @@ void ThreadPool::schedule(Task task) {
}
void ThreadPool::waitForIdle() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// If there are any pending tasks, or non-idle threads, the pool is not idle.
while (!_pendingTasks.empty() || _numIdleThreads < _threads.size()) {
_poolIsIdle.wait(lk);
@@ -229,7 +229,7 @@ void ThreadPool::waitForIdle() {
}
ThreadPool::Stats ThreadPool::getStats() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
Stats result;
result.options = _options;
result.numThreads = _threads.size();
@@ -257,7 +257,7 @@ void ThreadPool::_workerThreadBody(ThreadPool* pool, const std::string& threadNa
}
void ThreadPool::_consumeTasks() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (_state == running) {
if (_pendingTasks.empty()) {
if (_threads.size() > _options.minThreads) {
@@ -331,7 +331,7 @@ void ThreadPool::_consumeTasks() {
fassertFailedNoTrace(28703);
}
-void ThreadPool::_doOneTask(stdx::unique_lock<stdx::mutex>* lk) noexcept {
+void ThreadPool::_doOneTask(stdx::unique_lock<Latch>* lk) noexcept {
invariant(!_pendingTasks.empty());
LOG(3) << "Executing a task on behalf of pool " << _options.poolName;
Task task = std::move(_pendingTasks.front());
diff --git a/src/mongo/util/concurrency/thread_pool.h b/src/mongo/util/concurrency/thread_pool.h
index bbae97d1ebe..c382df9544d 100644
--- a/src/mongo/util/concurrency/thread_pool.h
+++ b/src/mongo/util/concurrency/thread_pool.h
@@ -34,8 +34,8 @@
#include <string>
#include <vector>
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/concurrency/thread_pool_interface.h"
#include "mongo/util/time_support.h"
@@ -189,7 +189,7 @@ private:
/**
* Implementation of join once _mutex is owned by "lk".
*/
- void _join_inlock(stdx::unique_lock<stdx::mutex>* lk);
+ void _join_inlock(stdx::unique_lock<Latch>* lk);
/**
* Runs the remaining tasks on a new thread as part of the join process, blocking until
@@ -201,7 +201,7 @@ private:
* Executes one task from _pendingTasks. "lk" must own _mutex, and _pendingTasks must have at
* least one entry.
*/
- void _doOneTask(stdx::unique_lock<stdx::mutex>* lk) noexcept;
+ void _doOneTask(stdx::unique_lock<Latch>* lk) noexcept;
/**
* Changes the lifecycle state (_state) of the pool and wakes up any threads waiting for a state
@@ -213,7 +213,7 @@ private:
const Options _options;
// Mutex guarding all non-const member variables.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ThreadPool::_mutex");
// This variable represents the lifecycle state of the pool.
//
diff --git a/src/mongo/util/concurrency/thread_pool_test.cpp b/src/mongo/util/concurrency/thread_pool_test.cpp
index 1d85b8b95df..5812a860eab 100644
--- a/src/mongo/util/concurrency/thread_pool_test.cpp
+++ b/src/mongo/util/concurrency/thread_pool_test.cpp
@@ -34,8 +34,8 @@
#include <boost/optional.hpp>
#include "mongo/base/init.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/unittest/barrier.h"
#include "mongo/unittest/death_test.h"
@@ -70,7 +70,7 @@ protected:
}
void blockingWork() {
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
++count1;
cv1.notify_all();
while (!flag2) {
@@ -78,7 +78,7 @@ protected:
}
}
- stdx::mutex mutex;
+ Mutex mutex = MONGO_MAKE_LATCH("ThreadPoolTest::mutex");
stdx::condition_variable cv1;
stdx::condition_variable cv2;
size_t count1 = 0U;
@@ -86,7 +86,7 @@ protected:
private:
void tearDown() override {
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
flag2 = true;
cv2.notify_all();
lk.unlock();
@@ -103,7 +103,7 @@ TEST_F(ThreadPoolTest, MinPoolSize0) {
auto& pool = makePool(options);
pool.startup();
ASSERT_EQ(0U, pool.getStats().numThreads);
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
pool.schedule([this](auto status) {
ASSERT_OK(status);
blockingWork();
@@ -155,7 +155,7 @@ TEST_F(ThreadPoolTest, MaxPoolSize20MinPoolSize15) {
options.maxIdleThreadAge = Milliseconds(100);
auto& pool = makePool(options);
pool.startup();
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
for (size_t i = 0U; i < 30U; ++i) {
pool.schedule([this, i](auto status) {
ASSERT_OK(status) << i;
@@ -223,7 +223,7 @@ DEATH_TEST(ThreadPoolTest,
// mutex-lock is blocked waiting for the mutex, so the independent thread must be blocked inside
// of join(), until the pool thread finishes. At this point, if we destroy the pool, its
// destructor should trigger a fatal error due to double-join.
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
ThreadPool::Options options;
options.minThreads = 2;
options.poolName = "DoubleJoinPool";
@@ -233,10 +233,10 @@ DEATH_TEST(ThreadPoolTest,
while (pool->getStats().numThreads < 2U) {
sleepmillis(50);
}
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
pool->schedule([&mutex](auto status) {
ASSERT_OK(status);
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
});
stdx::thread t([&pool] {
pool->shutdown();
diff --git a/src/mongo/util/concurrency/thread_pool_test_common.cpp b/src/mongo/util/concurrency/thread_pool_test_common.cpp
index 5f32e649c65..33ff3cb303b 100644
--- a/src/mongo/util/concurrency/thread_pool_test_common.cpp
+++ b/src/mongo/util/concurrency/thread_pool_test_common.cpp
@@ -35,8 +35,8 @@
#include <memory>
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/unittest/death_test.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/concurrency/thread_pool_interface.h"
@@ -203,10 +203,10 @@ COMMON_THREAD_POOL_TEST(RepeatedScheduleDoesntSmashStack) {
auto& pool = getThreadPool();
std::function<void()> func;
std::size_t n = 0;
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable condvar;
func = [&pool, &n, &func, &condvar, &mutex, depth]() {
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
if (n < depth) {
n++;
lk.unlock();
@@ -223,7 +223,7 @@ COMMON_THREAD_POOL_TEST(RepeatedScheduleDoesntSmashStack) {
pool.startup();
pool.join();
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
condvar.wait(lk, [&n, depth] { return n == depth; });
}
diff --git a/src/mongo/util/concurrency/ticketholder.cpp b/src/mongo/util/concurrency/ticketholder.cpp
index e30746807ae..a6abd154b2e 100644
--- a/src/mongo/util/concurrency/ticketholder.cpp
+++ b/src/mongo/util/concurrency/ticketholder.cpp
@@ -128,7 +128,7 @@ void TicketHolder::release() {
}
Status TicketHolder::resize(int newSize) {
- stdx::lock_guard<stdx::mutex> lk(_resizeMutex);
+ stdx::lock_guard<Latch> lk(_resizeMutex);
if (newSize < 5)
return Status(ErrorCodes::BadValue,
@@ -174,12 +174,12 @@ TicketHolder::TicketHolder(int num) : _outof(num), _num(num) {}
TicketHolder::~TicketHolder() = default;
bool TicketHolder::tryAcquire() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _tryAcquire();
}
void TicketHolder::waitForTicket(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (opCtx) {
opCtx->waitForConditionOrInterrupt(_newTicket, lk, [this] { return _tryAcquire(); });
@@ -189,7 +189,7 @@ void TicketHolder::waitForTicket(OperationContext* opCtx) {
}
bool TicketHolder::waitForTicketUntil(OperationContext* opCtx, Date_t until) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (opCtx) {
return opCtx->waitForConditionOrInterruptUntil(
@@ -202,14 +202,14 @@ bool TicketHolder::waitForTicketUntil(OperationContext* opCtx, Date_t until) {
void TicketHolder::release() {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_num++;
}
_newTicket.notify_one();
}
Status TicketHolder::resize(int newSize) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
int used = _outof.load() - _num;
if (used > newSize) {
diff --git a/src/mongo/util/concurrency/ticketholder.h b/src/mongo/util/concurrency/ticketholder.h
index 8ab3d4a39d9..d67e6bd04d1 100644
--- a/src/mongo/util/concurrency/ticketholder.h
+++ b/src/mongo/util/concurrency/ticketholder.h
@@ -33,8 +33,8 @@
#endif
#include "mongo/db/operation_context.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/mutex.h"
#include "mongo/util/time_support.h"
@@ -87,13 +87,13 @@ private:
// You can read _outof without a lock, but have to hold _resizeMutex to change.
AtomicWord<int> _outof;
- stdx::mutex _resizeMutex;
+ Mutex _resizeMutex = MONGO_MAKE_LATCH("TicketHolder::_resizeMutex");
#else
bool _tryAcquire();
AtomicWord<int> _outof;
int _num;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("TicketHolder::_mutex");
stdx::condition_variable _newTicket;
#endif
};
diff --git a/src/mongo/util/concurrency/with_lock.h b/src/mongo/util/concurrency/with_lock.h
index d5c55a16cb3..9d7f24bed8e 100644
--- a/src/mongo/util/concurrency/with_lock.h
+++ b/src/mongo/util/concurrency/with_lock.h
@@ -29,7 +29,7 @@
#pragma once
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/assert_util.h"
#include <utility>
@@ -56,7 +56,7 @@ namespace mongo {
*
* A call to such a function looks like this:
*
- * stdx::lock_guard<stdx::mutex> lk(_mutex);
+ * stdx::lock_guard<Latch> lk(_mutex);
* _clobber(lk, opCtx); // instead of _clobber_inlock(opCtx)
*
* Note that the formal argument need not (and should not) be named unless it is needed to pass
@@ -68,11 +68,11 @@ namespace mongo {
*
*/
struct WithLock {
- template <typename Mutex>
- WithLock(stdx::lock_guard<Mutex> const&) noexcept {}
+ template <typename LatchT>
+ WithLock(stdx::lock_guard<LatchT> const&) noexcept {}
- template <typename Mutex>
- WithLock(stdx::unique_lock<Mutex> const& lock) noexcept {
+ template <typename LatchT>
+ WithLock(stdx::unique_lock<LatchT> const& lock) noexcept {
invariant(lock.owns_lock());
}
@@ -88,9 +88,9 @@ struct WithLock {
// No moving a lock_guard<> or unique_lock<> in.
template <typename Mutex>
- WithLock(stdx::lock_guard<Mutex>&&) = delete;
+ WithLock(stdx::lock_guard<Latch>&&) = delete;
template <typename Mutex>
- WithLock(stdx::unique_lock<Mutex>&&) = delete;
+ WithLock(stdx::unique_lock<Latch>&&) = delete;
/*
* Produces a WithLock without benefit of any actual lock, for use in cases where a lock is not
diff --git a/src/mongo/util/concurrency/with_lock_test.cpp b/src/mongo/util/concurrency/with_lock_test.cpp
index 0bfe2b3829e..5724f899471 100644
--- a/src/mongo/util/concurrency/with_lock_test.cpp
+++ b/src/mongo/util/concurrency/with_lock_test.cpp
@@ -31,7 +31,7 @@
#include "mongo/platform/basic.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/log.h"
@@ -46,15 +46,15 @@ struct Beerp {
explicit Beerp(int i) {
_blerp(WithLock::withoutLock(), i);
}
- Beerp(stdx::lock_guard<stdx::mutex> const& lk, int i) {
+ Beerp(stdx::lock_guard<Latch> const& lk, int i) {
_blerp(lk, i);
}
int bleep(char n) {
- stdx::lock_guard<stdx::mutex> lk(_m);
+ stdx::lock_guard<Latch> lk(_m);
return _bloop(lk, n - '0');
}
int bleep(int i) {
- stdx::unique_lock<stdx::mutex> lk(_m);
+ stdx::unique_lock<Latch> lk(_m);
return _bloop(lk, i);
}
@@ -66,7 +66,7 @@ private:
log() << i << " bleep" << (i == 1 ? "\n" : "s\n");
return i;
}
- stdx::mutex _m;
+ Mutex _m = MONGO_MAKE_LATCH("Beerp::_m");
};
TEST(WithLockTest, OverloadSet) {
@@ -74,8 +74,8 @@ TEST(WithLockTest, OverloadSet) {
ASSERT_EQ(1, b.bleep('1'));
ASSERT_EQ(2, b.bleep(2));
- stdx::mutex m;
- stdx::lock_guard<stdx::mutex> lk(m);
+ auto m = MONGO_MAKE_LATCH();
+ stdx::lock_guard<Latch> lk(m);
Beerp(lk, 3);
}
diff --git a/src/mongo/util/diagnostic_info.cpp b/src/mongo/util/diagnostic_info.cpp
index 9681414e797..ea6cbe376df 100644
--- a/src/mongo/util/diagnostic_info.cpp
+++ b/src/mongo/util/diagnostic_info.cpp
@@ -33,12 +33,17 @@
#include "mongo/util/diagnostic_info.h"
+#include "mongo/config.h"
+
#if defined(__linux__)
#include <elf.h>
-#include <execinfo.h>
#include <link.h>
#endif
+#if defined(MONGO_CONFIG_HAVE_EXECINFO_BACKTRACE)
+#include <execinfo.h>
+#endif
+
#include <fmt/format.h>
#include <fmt/ostream.h>
@@ -52,8 +57,6 @@
using namespace fmt::literals;
namespace mongo {
-// Maximum number of stack frames to appear in a backtrace.
-const unsigned int kMaxBackTraceFrames = 100;
namespace {
MONGO_FAIL_POINT_DEFINE(currentOpSpawnsThreadWaitingForLatch);
@@ -67,10 +70,10 @@ public:
void setIsContended(bool value);
private:
- Mutex _testMutex{kBlockedOpMutexName};
+ Mutex _testMutex = MONGO_MAKE_LATCH(kBlockedOpMutexName);
stdx::condition_variable _cv;
- stdx::mutex _m;
+ stdx::mutex _m; // NOLINT
struct State {
bool isContended = false;
@@ -127,34 +130,45 @@ void BlockedOp::setIsContended(bool value) {
_cv.notify_one();
}
-const auto gDiagnosticHandle = Client::declareDecoration<DiagnosticInfo::Diagnostic>();
+struct DiagnosticInfoHandle {
+ stdx::mutex mutex; // NOLINT
+ boost::optional<DiagnosticInfo> maybeInfo = boost::none;
+};
+const auto getDiagnosticInfoHandle = Client::declareDecoration<DiagnosticInfoHandle>();
MONGO_INITIALIZER(LockActions)(InitializerContext* context) {
-
- class LockActionsSubclass : public LockActions {
+ class LockActionsSubclass : public Mutex::LockActions {
void onContendedLock(const StringData& name) override {
- if (haveClient()) {
- DiagnosticInfo::Diagnostic::set(
- Client::getCurrent(),
- std::make_shared<DiagnosticInfo>(takeDiagnosticInfo(name)));
- }
-
- if (currentOpSpawnsThreadWaitingForLatch.shouldFail() &&
- (name == kBlockedOpMutexName)) {
- gBlockedOp.setIsContended(true);
+ auto client = Client::getCurrent();
+ if (client) {
+ auto& handle = getDiagnosticInfoHandle(client);
+ stdx::lock_guard<stdx::mutex> lk(handle.mutex);
+ handle.maybeInfo.emplace(DiagnosticInfo::capture(name));
+
+ if (currentOpSpawnsThreadWaitingForLatch.shouldFail() &&
+ (name == kBlockedOpMutexName)) {
+ gBlockedOp.setIsContended(true);
+ }
}
}
- void onUnlock(const StringData&) override {
- DiagnosticInfo::Diagnostic::clearDiagnostic();
+ void onUnlock(const StringData& name) override {
+ auto client = Client::getCurrent();
+ if (client) {
+ auto& handle = getDiagnosticInfoHandle(client);
+ stdx::lock_guard<stdx::mutex> lk(handle.mutex);
+ handle.maybeInfo.reset();
+ }
}
};
- std::unique_ptr<LockActions> mutexPointer = std::make_unique<LockActionsSubclass>();
- Mutex::setLockActions(std::move(mutexPointer));
+ // Intentionally leaked, people use Latches in detached threads
+ static auto& actions = *new LockActionsSubclass;
+ Mutex::LockActions::add(&actions);
return Status::OK();
}
+/*
MONGO_INITIALIZER(ConditionVariableActions)(InitializerContext* context) {
class ConditionVariableActionsSubclass : public ConditionVariableActions {
@@ -162,7 +176,7 @@ MONGO_INITIALIZER(ConditionVariableActions)(InitializerContext* context) {
if (haveClient()) {
DiagnosticInfo::Diagnostic::set(
Client::getCurrent(),
- std::make_shared<DiagnosticInfo>(takeDiagnosticInfo(name)));
+ std::make_shared<DiagnosticInfo>(capture(name)));
}
}
void onFulfilledConditionVariable() override {
@@ -176,28 +190,10 @@ MONGO_INITIALIZER(ConditionVariableActions)(InitializerContext* context) {
return Status::OK();
}
+*/
} // namespace
-auto DiagnosticInfo::Diagnostic::get(Client* const client) -> std::shared_ptr<DiagnosticInfo> {
- auto& handle = gDiagnosticHandle(client);
- stdx::lock_guard lk(handle.m);
- return handle.diagnostic;
-}
-
-void DiagnosticInfo::Diagnostic::set(Client* const client,
- std::shared_ptr<DiagnosticInfo> newDiagnostic) {
- auto& handle = gDiagnosticHandle(client);
- stdx::lock_guard lk(handle.m);
- handle.diagnostic = newDiagnostic;
-}
-
-void DiagnosticInfo::Diagnostic::clearDiagnostic() {
- if (haveClient()) {
- DiagnosticInfo::Diagnostic::set(Client::getCurrent(), nullptr);
- }
-}
-
#if defined(__linux__)
namespace {
@@ -232,8 +228,6 @@ MONGO_INITIALIZER(InitializeDynamicObjectMap)(InitializerContext* context) {
return Status::OK();
};
-} // anonymous namespace
-
int DynamicObjectMap::addToMap(dl_phdr_info* info, size_t size, void* data) {
auto& addr_map = *reinterpret_cast<decltype(DynamicObjectMap::_map)*>(data);
for (int j = 0; j < info->dlpi_phnum; j++) {
@@ -271,30 +265,33 @@ DiagnosticInfo::StackFrame DynamicObjectMap::getFrame(void* instructionPtr) cons
return DiagnosticInfo::StackFrame{frame.objectPath, fileOffset};
}
+} // namespace
+#endif // linux
+
+#if defined(MONGO_CONFIG_HAVE_EXECINFO_BACKTRACE) && defined(__linux__)
// iterates through the backtrace instruction pointers to
// find the instruction pointer that refers to a segment in the addr_map
DiagnosticInfo::StackTrace DiagnosticInfo::makeStackTrace() const {
DiagnosticInfo::StackTrace trace;
- for (auto addr : _backtraceAddresses) {
- trace.frames.emplace_back(gDynamicObjectMap.getFrame(addr));
+ for (auto address : _backtrace.data) {
+ trace.frames.emplace_back(gDynamicObjectMap.getFrame(address));
}
return trace;
}
-static std::vector<void*> getBacktraceAddresses() {
- std::vector<void*> backtraceAddresses(kMaxBackTraceFrames, 0);
- int addressCount = backtrace(backtraceAddresses.data(), kMaxBackTraceFrames);
- // backtrace will modify the vector's underlying array without updating its size
- backtraceAddresses.resize(static_cast<unsigned int>(addressCount));
- return backtraceAddresses;
+auto DiagnosticInfo::getBacktrace() -> Backtrace {
+ Backtrace list;
+ auto len = ::backtrace(list.data.data(), list.data.size());
+ list.data.resize(len);
+ return list;
}
#else
DiagnosticInfo::StackTrace DiagnosticInfo::makeStackTrace() const {
return DiagnosticInfo::StackTrace();
}
-static std::vector<void*> getBacktraceAddresses() {
- return std::vector<void*>();
+auto DiagnosticInfo::getBacktrace() -> Backtrace {
+ return {};
}
#endif
@@ -311,7 +308,7 @@ bool operator==(const DiagnosticInfo::StackTrace& trace1,
bool operator==(const DiagnosticInfo& info1, const DiagnosticInfo& info2) {
return info1._captureName == info2._captureName && info1._timestamp == info2._timestamp &&
- info1._backtraceAddresses == info2._backtraceAddresses;
+ info1._backtrace.data == info2._backtrace.data;
}
std::string DiagnosticInfo::StackFrame::toString() const {
@@ -335,15 +332,16 @@ std::string DiagnosticInfo::StackTrace::toString() const {
std::string DiagnosticInfo::toString() const {
return "{{ \"name\": \"{}\", \"time\": \"{}\", \"backtraceSize\": {} }}"_format(
- _captureName.toString(), _timestamp.toString(), _backtraceAddresses.size());
+ _captureName.toString(), _timestamp.toString(), _backtrace.data.size());
}
-DiagnosticInfo takeDiagnosticInfo(const StringData& captureName) {
+DiagnosticInfo DiagnosticInfo::capture(const StringData& captureName, Options options) {
// uses backtrace to retrieve an array of instruction pointers for currently active
// function calls of the program
return DiagnosticInfo(getGlobalServiceContext()->getFastClockSource()->now(),
captureName,
- getBacktraceAddresses());
+ options.shouldTakeBacktrace ? DiagnosticInfo::getBacktrace()
+ : Backtrace{{}});
}
DiagnosticInfo::BlockedOpGuard::~BlockedOpGuard() {
@@ -365,4 +363,10 @@ auto DiagnosticInfo::maybeMakeBlockedOpForTest(Client* client) -> std::unique_pt
return guard;
}
+boost::optional<DiagnosticInfo> DiagnosticInfo::get(Client& client) {
+ auto& handle = getDiagnosticInfoHandle(client);
+ stdx::lock_guard<stdx::mutex> lk(handle.mutex);
+ return handle.maybeInfo;
+}
+
} // namespace mongo
diff --git a/src/mongo/util/diagnostic_info.h b/src/mongo/util/diagnostic_info.h
index a2c3339d3a5..1e578cecd80 100644
--- a/src/mongo/util/diagnostic_info.h
+++ b/src/mongo/util/diagnostic_info.h
@@ -53,19 +53,15 @@ public:
~BlockedOpGuard();
};
- struct Diagnostic {
- static std::shared_ptr<DiagnosticInfo> get(Client*);
- static void set(Client*, std::shared_ptr<DiagnosticInfo>);
- static void clearDiagnostic();
- stdx::mutex m;
- std::shared_ptr<DiagnosticInfo> diagnostic;
- };
+ static boost::optional<DiagnosticInfo> get(Client& client);
virtual ~DiagnosticInfo() = default;
- DiagnosticInfo(const DiagnosticInfo&) = delete;
- DiagnosticInfo& operator=(const DiagnosticInfo&) = delete;
- DiagnosticInfo(DiagnosticInfo&&) = default;
- DiagnosticInfo& operator=(DiagnosticInfo&&) = default;
+
+ // Maximum number of stack frames to appear in a backtrace.
+ static constexpr size_t kMaxBackTraceFrames = 100ull;
+ struct Backtrace {
+ std::vector<void*> data = std::vector<void*>(kMaxBackTraceFrames, nullptr);
+ };
struct StackFrame {
std::string toString() const;
@@ -97,10 +93,23 @@ public:
StackTrace makeStackTrace() const;
- static std::vector<void*> getBacktraceAddresses();
+ static Backtrace getBacktrace();
std::string toString() const;
- friend DiagnosticInfo takeDiagnosticInfo(const StringData& captureName);
+
+ /**
+ * Simple options struct to go with takeDiagnosticInfo
+ */
+ struct Options {
+ Options() : shouldTakeBacktrace{false} {}
+
+ bool shouldTakeBacktrace;
+ };
+
+ /**
+ * Captures the diagnostic information based on the caller's context.
+ */
+ static DiagnosticInfo capture(const StringData& captureName, Options options = Options{});
/**
* This function checks the FailPoint currentOpSpawnsThreadWaitingForLatch and potentially
@@ -117,14 +126,10 @@ private:
Date_t _timestamp;
StringData _captureName;
- std::vector<void*> _backtraceAddresses;
-
- DiagnosticInfo(const Date_t& timestamp,
- const StringData& captureName,
- std::vector<void*> backtraceAddresses)
- : _timestamp(timestamp),
- _captureName(captureName),
- _backtraceAddresses(backtraceAddresses) {}
+ Backtrace _backtrace;
+
+ DiagnosticInfo(const Date_t& timestamp, const StringData& captureName, Backtrace backtrace)
+ : _timestamp(timestamp), _captureName(captureName), _backtrace(std::move(backtrace)) {}
};
@@ -136,9 +141,4 @@ inline std::ostream& operator<<(std::ostream& s, const DiagnosticInfo& info) {
return s << info.toString();
}
-/**
- * Captures the diagnostic information based on the caller's context.
- */
-DiagnosticInfo takeDiagnosticInfo(const StringData& captureName);
-
} // namespace mongo
diff --git a/src/mongo/util/diagnostic_info_test.cpp b/src/mongo/util/diagnostic_info_test.cpp
index 853962f515e..dc8ebdda058 100644
--- a/src/mongo/util/diagnostic_info_test.cpp
+++ b/src/mongo/util/diagnostic_info_test.cpp
@@ -33,6 +33,8 @@
#include <string>
+#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/compiler.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/clock_source_mock.h"
#include "mongo/util/log.h"
@@ -47,7 +49,7 @@ TEST(DiagnosticInfo, BasicSingleThread) {
setGlobalServiceContext(std::move(serviceContext));
// take the initial diagnostic info
- DiagnosticInfo capture1 = takeDiagnosticInfo("capture1"_sd);
+ DiagnosticInfo capture1 = DiagnosticInfo::capture("capture1"_sd);
ASSERT_EQ(capture1.getCaptureName(), "capture1");
// mock time advancing and check that the current time is greater than capture1's timestamp
@@ -55,7 +57,7 @@ TEST(DiagnosticInfo, BasicSingleThread) {
ASSERT_LT(capture1.getTimestamp(), clockSourcePointer->now());
// take a second diagnostic capture and compare its fields to the first
- DiagnosticInfo capture2 = takeDiagnosticInfo("capture2"_sd);
+ DiagnosticInfo capture2 = DiagnosticInfo::capture("capture2"_sd);
ASSERT_LT(capture1.getTimestamp(), capture2.getTimestamp());
ASSERT_EQ(capture2.getCaptureName(), "capture2");
ASSERT_NE(capture2, capture1);
@@ -65,7 +67,7 @@ TEST(DiagnosticInfo, BasicSingleThread) {
}
using MaybeDiagnosticInfo = boost::optional<DiagnosticInfo>;
-void recurseAndCaptureInfo(MaybeDiagnosticInfo& info, size_t i);
+void recurseAndCaptureInfo(MaybeDiagnosticInfo& info, AtomicWord<int>& i);
TEST(DiagnosticInfo, StackTraceTest) {
// set up serviceContext and clock source
@@ -75,7 +77,11 @@ TEST(DiagnosticInfo, StackTraceTest) {
setGlobalServiceContext(std::move(serviceContext));
MaybeDiagnosticInfo infoRecurse0;
- recurseAndCaptureInfo(infoRecurse0, 0);
+ {
+ AtomicWord<int> i{0};
+ recurseAndCaptureInfo(infoRecurse0, i);
+ }
+
ASSERT(infoRecurse0);
log() << *infoRecurse0;
auto trace0 = infoRecurse0->makeStackTrace();
@@ -120,34 +126,33 @@ TEST(DiagnosticInfo, StackTraceTest) {
};
{
- volatile size_t i = 3; // NOLINT
+ constexpr auto k = 3;
+ AtomicWord<int> i{k};
MaybeDiagnosticInfo infoRecurse;
recurseAndCaptureInfo(infoRecurse, i);
- testRecursion(i, infoRecurse);
+ testRecursion(k, infoRecurse);
}
{
- volatile size_t i = 10; // NOLINT
+ constexpr auto k = 10;
+ AtomicWord<int> i{k};
MaybeDiagnosticInfo infoRecurse;
recurseAndCaptureInfo(infoRecurse, i);
- testRecursion(i, infoRecurse);
+ testRecursion(k, infoRecurse);
}
#else
ASSERT_TRUE(trace0.frames.empty());
#endif
}
-MONGO_COMPILER_NOINLINE void recurseAndCaptureInfo(MaybeDiagnosticInfo& info, size_t i) {
- // Prevent tail-call optimization.
-#ifndef _WIN32
- asm volatile(""); // NOLINT
-#endif
-
- if (i == 0) {
- info = takeDiagnosticInfo("Recursion!"_sd);
+MONGO_COMPILER_NOINLINE void recurseAndCaptureInfo(MaybeDiagnosticInfo& info, AtomicWord<int>& i) {
+ if (i.fetchAndSubtract(1) == 0) {
+ DiagnosticInfo::Options options;
+ options.shouldTakeBacktrace = true;
+ info = DiagnosticInfo::capture("Recursion!"_sd, std::move(options));
return;
}
- recurseAndCaptureInfo(info, --i);
+ recurseAndCaptureInfo(info, i);
}
} // namespace mongo
diff --git a/src/mongo/util/exit.cpp b/src/mongo/util/exit.cpp
index b92b59253ea..49b741c4493 100644
--- a/src/mongo/util/exit.cpp
+++ b/src/mongo/util/exit.cpp
@@ -37,8 +37,8 @@
#include <functional>
#include <stack>
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/log.h"
#include "mongo/util/quick_exit.h"
@@ -47,7 +47,7 @@ namespace mongo {
namespace {
-stdx::mutex shutdownMutex;
+Mutex shutdownMutex;
stdx::condition_variable shutdownTasksComplete;
boost::optional<ExitCode> shutdownExitCode;
bool shutdownTasksInProgress = false;
@@ -83,7 +83,7 @@ bool globalInShutdownDeprecated() {
}
ExitCode waitForShutdown() {
- stdx::unique_lock<stdx::mutex> lk(shutdownMutex);
+ stdx::unique_lock<Latch> lk(shutdownMutex);
shutdownTasksComplete.wait(lk, [] {
const auto shutdownStarted = static_cast<bool>(shutdownExitCode);
return shutdownStarted && !shutdownTasksInProgress;
@@ -93,7 +93,7 @@ ExitCode waitForShutdown() {
}
void registerShutdownTask(unique_function<void(const ShutdownTaskArgs&)> task) {
- stdx::lock_guard<stdx::mutex> lock(shutdownMutex);
+ stdx::lock_guard<Latch> lock(shutdownMutex);
invariant(!globalInShutdownDeprecated());
shutdownTasks.emplace(std::move(task));
}
@@ -102,7 +102,7 @@ void shutdown(ExitCode code, const ShutdownTaskArgs& shutdownArgs) {
decltype(shutdownTasks) localTasks;
{
- stdx::unique_lock<stdx::mutex> lock(shutdownMutex);
+ stdx::unique_lock<Latch> lock(shutdownMutex);
if (shutdownTasksInProgress) {
// Someone better have called shutdown in some form already.
@@ -138,7 +138,7 @@ void shutdown(ExitCode code, const ShutdownTaskArgs& shutdownArgs) {
runTasks(std::move(localTasks), shutdownArgs);
{
- stdx::lock_guard<stdx::mutex> lock(shutdownMutex);
+ stdx::lock_guard<Latch> lock(shutdownMutex);
shutdownTasksInProgress = false;
shutdownTasksComplete.notify_all();
@@ -151,7 +151,7 @@ void shutdownNoTerminate(const ShutdownTaskArgs& shutdownArgs) {
decltype(shutdownTasks) localTasks;
{
- stdx::lock_guard<stdx::mutex> lock(shutdownMutex);
+ stdx::lock_guard<Latch> lock(shutdownMutex);
if (globalInShutdownDeprecated())
return;
@@ -166,7 +166,7 @@ void shutdownNoTerminate(const ShutdownTaskArgs& shutdownArgs) {
runTasks(std::move(localTasks), shutdownArgs);
{
- stdx::lock_guard<stdx::mutex> lock(shutdownMutex);
+ stdx::lock_guard<Latch> lock(shutdownMutex);
shutdownTasksInProgress = false;
shutdownExitCode.emplace(EXIT_CLEAN);
}
diff --git a/src/mongo/util/fail_point.cpp b/src/mongo/util/fail_point.cpp
index c40a4b829d6..389a809ef23 100644
--- a/src/mongo/util/fail_point.cpp
+++ b/src/mongo/util/fail_point.cpp
@@ -93,7 +93,7 @@ void FailPoint::setMode(Mode mode, ValType val, BSONObj extra) {
* 3. Sets the new mode.
*/
- stdx::lock_guard<stdx::mutex> scoped(_modMutex);
+ stdx::lock_guard<Latch> scoped(_modMutex);
// Step 1
disable();
@@ -259,7 +259,7 @@ StatusWith<FailPoint::ModeOptions> FailPoint::parseBSON(const BSONObj& obj) {
BSONObj FailPoint::toBSON() const {
BSONObjBuilder builder;
- stdx::lock_guard<stdx::mutex> scoped(_modMutex);
+ stdx::lock_guard<Latch> scoped(_modMutex);
builder.append("mode", _mode);
builder.append("data", _data);
diff --git a/src/mongo/util/fail_point.h b/src/mongo/util/fail_point.h
index 57ee76bca9d..daf39bcba49 100644
--- a/src/mongo/util/fail_point.h
+++ b/src/mongo/util/fail_point.h
@@ -35,7 +35,7 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/operation_context.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -314,7 +314,7 @@ private:
BSONObj _data;
// protects _mode, _timesOrPeriod, _data
- mutable stdx::mutex _modMutex;
+ mutable Mutex _modMutex = MONGO_MAKE_LATCH("FailPoint::_modMutex");
};
} // namespace mongo
diff --git a/src/mongo/util/fail_point_test.cpp b/src/mongo/util/fail_point_test.cpp
index 1880b0d18a1..a99d7132a40 100644
--- a/src/mongo/util/fail_point_test.cpp
+++ b/src/mongo/util/fail_point_test.cpp
@@ -36,6 +36,7 @@
#include <string>
#include <vector>
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/fail_point.h"
@@ -46,6 +47,7 @@
using mongo::BSONObj;
using mongo::FailPoint;
using mongo::FailPointEnableBlock;
+
namespace stdx = mongo::stdx;
namespace mongo_test {
@@ -157,7 +159,7 @@ public:
void stopTest() {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<mongo::Latch> lk(_mutex);
_inShutdown = true;
}
for (auto& t : _tasks) {
@@ -179,7 +181,7 @@ private:
}
});
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<mongo::Latch> lk(_mutex);
if (_inShutdown)
break;
}
@@ -200,7 +202,7 @@ private:
} catch (const std::logic_error&) {
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<mongo::Latch> lk(_mutex);
if (_inShutdown)
break;
}
@@ -209,7 +211,7 @@ private:
void simpleTask() {
while (true) {
static_cast<void>(MONGO_unlikely(_fp.shouldFail()));
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<mongo::Latch> lk(_mutex);
if (_inShutdown)
break;
}
@@ -223,7 +225,7 @@ private:
_fp.setMode(FailPoint::alwaysOn, 0, BSON("a" << 44));
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<mongo::Latch> lk(_mutex);
if (_inShutdown)
break;
}
@@ -231,7 +233,8 @@ private:
FailPoint _fp;
std::vector<stdx::thread> _tasks;
- stdx::mutex _mutex;
+
+ mongo::Mutex _mutex = MONGO_MAKE_LATCH();
bool _inShutdown = false;
};
diff --git a/src/mongo/util/future_impl.h b/src/mongo/util/future_impl.h
index 64aa57a22d8..db83f1c0cbc 100644
--- a/src/mongo/util/future_impl.h
+++ b/src/mongo/util/future_impl.h
@@ -38,8 +38,8 @@
#include "mongo/base/status.h"
#include "mongo/base/status_with.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/type_traits.h"
#include "mongo/stdx/utility.h"
#include "mongo/util/assert_util.h"
@@ -362,7 +362,7 @@ public:
if (state.load(std::memory_order_acquire) == SSBState::kFinished)
return;
- stdx::unique_lock<stdx::mutex> lk(mx);
+ stdx::unique_lock<Latch> lk(mx);
if (!cv) {
cv.emplace();
@@ -430,7 +430,7 @@ public:
Children localChildren;
- stdx::unique_lock<stdx::mutex> lk(mx);
+ stdx::unique_lock<Latch> lk(mx);
localChildren.swap(children);
if (cv) {
// This must be done inside the lock to correctly synchronize with wait().
@@ -483,8 +483,8 @@ public:
// These are only used to signal completion to blocking waiters. Benchmarks showed that it was
// worth deferring the construction of cv, so it can be avoided when it isn't necessary.
- stdx::mutex mx; // F (not that it matters)
- boost::optional<stdx::condition_variable> cv; // F (but guarded by mutex)
+ Mutex mx = MONGO_MAKE_LATCH("FutureResolution"); // F
+ boost::optional<stdx::condition_variable> cv; // F (but guarded by mutex)
// This holds the children created from a SharedSemiFuture. When this SharedState is completed,
// the result will be copied in to each of the children. This allows their continuations to have
diff --git a/src/mongo/util/heap_profiler.cpp b/src/mongo/util/heap_profiler.cpp
index 05a8a59f03a..2784eac8cfc 100644
--- a/src/mongo/util/heap_profiler.cpp
+++ b/src/mongo/util/heap_profiler.cpp
@@ -282,8 +282,10 @@ private:
// >1: sample ever sampleIntervalBytes bytes allocated - less accurate but fast and small
std::atomic_size_t sampleIntervalBytes; // NOLINT
- stdx::mutex hashtable_mutex; // guards updates to both object and stack hash tables
- stdx::mutex stackinfo_mutex; // guards against races updating the StackInfo bson representation
+ // guards updates to both object and stack hash tables
+ stdx::mutex hashtable_mutex; // NOLINT
+ // guards against races updating the StackInfo bson representation
+ stdx::mutex stackinfo_mutex; // NOLINT
// cumulative bytes allocated - determines when samples are taken
std::atomic_size_t bytesAllocated{0}; // NOLINT
diff --git a/src/mongo/util/interruptible.h b/src/mongo/util/interruptible.h
index 446e61849cc..6e182d6bbd7 100644
--- a/src/mongo/util/interruptible.h
+++ b/src/mongo/util/interruptible.h
@@ -331,9 +331,9 @@ public:
* Sleeps until "deadline"; throws an exception if the interruptible is interrupted before then.
*/
void sleepUntil(Date_t deadline) {
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
invariant(!waitForConditionOrInterruptUntil(cv, lk, deadline, [] { return false; }));
}
@@ -342,9 +342,9 @@ public:
* then.
*/
void sleepFor(Milliseconds duration) {
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
invariant(!waitForConditionOrInterruptFor(cv, lk, duration, [] { return false; }));
}
diff --git a/src/mongo/util/invalidating_lru_cache.h b/src/mongo/util/invalidating_lru_cache.h
index 7f899151b48..6cef29f63c8 100644
--- a/src/mongo/util/invalidating_lru_cache.h
+++ b/src/mongo/util/invalidating_lru_cache.h
@@ -34,8 +34,8 @@
#include <boost/optional.hpp>
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/concurrency/with_lock.h"
@@ -140,7 +140,7 @@ public:
* cache.
*/
boost::optional<std::shared_ptr<Value>> get(const Key& key) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto myGeneration = _generation;
auto cacheIt = _cache.find(key);
@@ -192,7 +192,7 @@ public:
* Returns a vector of info about items in the cache for testing/reporting purposes
*/
std::vector<CachedItemInfo> getCacheInfo() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
std::vector<CachedItemInfo> ret;
ret.reserve(_active.size() + _cache.size());
@@ -255,7 +255,7 @@ private:
private:
InvalidatingLRUCache<Key, Value, Invalidator>* _cache;
- stdx::unique_lock<stdx::mutex> _lk;
+ stdx::unique_lock<Latch> _lk;
std::vector<std::shared_ptr<Value>> _activePtrsToDestroy;
};
@@ -331,7 +331,7 @@ private:
auto _makeDeleterWithLock(const Key& key, uint64_t myGeneration) -> auto {
return [this, key, myGeneration](Value* d) {
std::unique_ptr<Value> owned(d);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto it = _active.find(key);
if (it != _active.end() && it->second.expired()) {
_active.erase(it);
@@ -345,7 +345,7 @@ private:
};
}
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("InvalidatingLRUCache::_mutex");
// The generation count - items will not be returned to the cache if their generation count
// does not match the current generation count
diff --git a/src/mongo/util/lockable_adapter_test.cpp b/src/mongo/util/lockable_adapter_test.cpp
index f5325635c74..f0431d19c0e 100644
--- a/src/mongo/util/lockable_adapter_test.cpp
+++ b/src/mongo/util/lockable_adapter_test.cpp
@@ -68,7 +68,7 @@ public:
int unlockCalls{0};
private:
- stdx::mutex _mutex;
+ stdx::mutex _mutex; // NOLINT
};
} // namespace
@@ -76,7 +76,7 @@ private:
TEST(BasicLockableAdapter, TestWithConditionVariable) {
bool ready = false;
stdx::condition_variable_any cv;
- stdx::mutex mut;
+ stdx::mutex mut; // NOLINT
auto result = stdx::async(stdx::launch::async, [&ready, &mut, &cv] {
stdx::lock_guard lock(mut);
@@ -93,7 +93,7 @@ TEST(BasicLockableAdapter, TestWithConditionVariable) {
TEST(BasicLockableAdapter, TestWithMutexTypes) {
{
- stdx::mutex mut;
+ stdx::mutex mut; // NOLINT
callUnderLock(mut);
}
diff --git a/src/mongo/util/net/http_client_curl.cpp b/src/mongo/util/net/http_client_curl.cpp
index fc5307bcb45..ca51d73111f 100644
--- a/src/mongo/util/net/http_client_curl.cpp
+++ b/src/mongo/util/net/http_client_curl.cpp
@@ -44,7 +44,7 @@
#include "mongo/base/string_data.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
#include "mongo/util/net/http_client.h"
@@ -131,17 +131,17 @@ private:
}
static void _lockShare(CURL*, curl_lock_data, curl_lock_access, void* ctx) {
- reinterpret_cast<stdx::mutex*>(ctx)->lock();
+ reinterpret_cast<Mutex*>(ctx)->lock();
}
static void _unlockShare(CURL*, curl_lock_data, void* ctx) {
- reinterpret_cast<stdx::mutex*>(ctx)->unlock();
+ reinterpret_cast<Mutex*>(ctx)->unlock();
}
private:
bool _initialized = false;
CURLSH* _share = nullptr;
- stdx::mutex _shareMutex;
+ Mutex _shareMutex = MONGO_MAKE_LATCH("CurlLibraryManager::_shareMutex");
} curlLibraryManager;
/**
diff --git a/src/mongo/util/net/ssl_manager_openssl.cpp b/src/mongo/util/net/ssl_manager_openssl.cpp
index f3933dc640f..864e55d40f1 100644
--- a/src/mongo/util/net/ssl_manager_openssl.cpp
+++ b/src/mongo/util/net/ssl_manager_openssl.cpp
@@ -346,7 +346,7 @@ private:
class ThreadIDManager {
public:
unsigned long reserveID() {
- stdx::unique_lock<stdx::mutex> lock(_idMutex);
+ stdx::unique_lock<Latch> lock(_idMutex);
if (!_idLast.empty()) {
unsigned long ret = _idLast.top();
_idLast.pop();
@@ -356,13 +356,14 @@ private:
}
void releaseID(unsigned long id) {
- stdx::unique_lock<stdx::mutex> lock(_idMutex);
+ stdx::unique_lock<Latch> lock(_idMutex);
_idLast.push(id);
}
private:
// Machinery for producing IDs that are unique for the life of a thread.
- stdx::mutex _idMutex; // Protects _idNext and _idLast.
+ Mutex _idMutex =
+ MONGO_MAKE_LATCH("ThreadIDManager::_idMutex"); // Protects _idNext and _idLast.
unsigned long _idNext = 0; // Stores the next thread ID to use, if none already allocated.
std::stack<unsigned long, std::vector<unsigned long>>
_idLast; // Stores old thread IDs, for reuse.
@@ -476,7 +477,7 @@ private:
/** Either returns a cached password, or prompts the user to enter one. */
StatusWith<StringData> fetchPassword() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_password->size()) {
return StringData(_password->c_str());
}
@@ -501,7 +502,7 @@ private:
}
private:
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("PasswordFetcher::_mutex");
SecureString _password; // Protected by _mutex
std::string _prompt;
diff --git a/src/mongo/util/options_parser/options_parser_test.cpp b/src/mongo/util/options_parser/options_parser_test.cpp
index 65b61b08ab5..7c2b223633e 100644
--- a/src/mongo/util/options_parser/options_parser_test.cpp
+++ b/src/mongo/util/options_parser/options_parser_test.cpp
@@ -3473,7 +3473,6 @@ TEST(Constraints, MutuallyExclusiveConstraint) {
ASSERT_OK(parser.run(testOpts, argv, env_map, &environment));
ASSERT_NOT_OK(environment.validate());
- ;
environment = moe::Environment();
argv.clear();
@@ -3482,7 +3481,6 @@ TEST(Constraints, MutuallyExclusiveConstraint) {
ASSERT_OK(parser.run(testOpts, argv, env_map, &environment));
ASSERT_OK(environment.validate());
- ;
ASSERT_OK(environment.get(moe::Key("option1"), &value));
environment = moe::Environment();
@@ -3492,7 +3490,6 @@ TEST(Constraints, MutuallyExclusiveConstraint) {
ASSERT_OK(parser.run(testOpts, argv, env_map, &environment));
ASSERT_OK(environment.validate());
- ;
ASSERT_OK(environment.get(moe::Key("section.option2"), &value));
}
@@ -3517,7 +3514,6 @@ TEST(Constraints, RequiresOtherConstraint) {
ASSERT_OK(parser.run(testOpts, argv, env_map, &environment));
ASSERT_NOT_OK(environment.validate());
- ;
environment = moe::Environment();
argv.clear();
@@ -3527,7 +3523,6 @@ TEST(Constraints, RequiresOtherConstraint) {
ASSERT_OK(parser.run(testOpts, argv, env_map, &environment));
ASSERT_OK(environment.validate());
- ;
ASSERT_OK(environment.get(moe::Key("option1"), &value));
ASSERT_OK(environment.get(moe::Key("section.option2"), &value));
@@ -3538,7 +3533,6 @@ TEST(Constraints, RequiresOtherConstraint) {
ASSERT_OK(parser.run(testOpts, argv, env_map, &environment));
ASSERT_OK(environment.validate());
- ;
ASSERT_OK(environment.get(moe::Key("section.option2"), &value));
}
diff --git a/src/mongo/util/periodic_runner.h b/src/mongo/util/periodic_runner.h
index e9dcfa67489..210bd3c4ecf 100644
--- a/src/mongo/util/periodic_runner.h
+++ b/src/mongo/util/periodic_runner.h
@@ -35,7 +35,7 @@
#include <boost/optional.hpp>
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/time_support.h"
namespace mongo {
diff --git a/src/mongo/util/periodic_runner_impl.cpp b/src/mongo/util/periodic_runner_impl.cpp
index 98a517cf7d9..dc9f091505d 100644
--- a/src/mongo/util/periodic_runner_impl.cpp
+++ b/src/mongo/util/periodic_runner_impl.cpp
@@ -77,7 +77,7 @@ void PeriodicRunnerImpl::PeriodicJobImpl::_run() {
}
startPromise.emplaceValue();
- stdx::unique_lock lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (_execStatus != ExecutionStatus::CANCELED) {
// Wait until it's unpaused or canceled
_condvar.wait(lk, [&] { return _execStatus != ExecutionStatus::PAUSED; });
@@ -120,14 +120,14 @@ void PeriodicRunnerImpl::PeriodicJobImpl::start() {
}
void PeriodicRunnerImpl::PeriodicJobImpl::pause() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_execStatus == PeriodicJobImpl::ExecutionStatus::RUNNING);
_execStatus = PeriodicJobImpl::ExecutionStatus::PAUSED;
}
void PeriodicRunnerImpl::PeriodicJobImpl::resume() {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_execStatus == PeriodicJobImpl::ExecutionStatus::PAUSED);
_execStatus = PeriodicJobImpl::ExecutionStatus::RUNNING;
}
@@ -136,7 +136,7 @@ void PeriodicRunnerImpl::PeriodicJobImpl::resume() {
void PeriodicRunnerImpl::PeriodicJobImpl::stop() {
auto lastExecStatus = [&] {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return std::exchange(_execStatus, ExecutionStatus::CANCELED);
}();
@@ -158,12 +158,12 @@ void PeriodicRunnerImpl::PeriodicJobImpl::stop() {
}
Milliseconds PeriodicRunnerImpl::PeriodicJobImpl::getPeriod() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _job.interval;
}
void PeriodicRunnerImpl::PeriodicJobImpl::setPeriod(Milliseconds ms) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_job.interval = ms;
if (_execStatus == PeriodicJobImpl::ExecutionStatus::RUNNING) {
diff --git a/src/mongo/util/periodic_runner_impl.h b/src/mongo/util/periodic_runner_impl.h
index a921a66c59f..07ed7db0ebd 100644
--- a/src/mongo/util/periodic_runner_impl.h
+++ b/src/mongo/util/periodic_runner_impl.h
@@ -32,8 +32,8 @@
#include <memory>
#include <vector>
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/clock_source.h"
#include "mongo/util/future.h"
@@ -82,7 +82,7 @@ private:
stdx::thread _thread;
SharedPromise<void> _stopPromise;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("PeriodicJobImpl::_mutex");
stdx::condition_variable _condvar;
/**
* The current execution status of the job.
diff --git a/src/mongo/util/periodic_runner_impl_test.cpp b/src/mongo/util/periodic_runner_impl_test.cpp
index 86f2f1a96d6..21018ea09b7 100644
--- a/src/mongo/util/periodic_runner_impl_test.cpp
+++ b/src/mongo/util/periodic_runner_impl_test.cpp
@@ -34,8 +34,8 @@
#include "mongo/util/periodic_runner_impl.h"
#include "mongo/db/service_context_test_fixture.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/clock_source_mock.h"
namespace mongo {
@@ -75,14 +75,14 @@ TEST_F(PeriodicRunnerImplTest, OneJobTest) {
int count = 0;
Milliseconds interval{5};
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
// Add a job, ensure that it runs once
PeriodicRunner::PeriodicJob job("job",
[&count, &mutex, &cv](Client*) {
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
count++;
}
cv.notify_all();
@@ -96,7 +96,7 @@ TEST_F(PeriodicRunnerImplTest, OneJobTest) {
for (int i = 0; i < 10; i++) {
clockSource().advance(interval);
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&count, &i] { return count > i; });
}
}
@@ -108,14 +108,14 @@ TEST_F(PeriodicRunnerImplTest, OnePausableJobDoesNotRunWithoutStart) {
int count = 0;
Milliseconds interval{5};
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
// Add a job, ensure that it runs once
PeriodicRunner::PeriodicJob job("job",
[&count, &mutex, &cv](Client*) {
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
count++;
}
cv.notify_all();
@@ -133,14 +133,14 @@ TEST_F(PeriodicRunnerImplTest, OnePausableJobRunsCorrectlyWithStart) {
int count = 0;
Milliseconds interval{5};
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
// Add a job, ensure that it runs once
PeriodicRunner::PeriodicJob job("job",
[&count, &mutex, &cv](Client*) {
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
count++;
}
cv.notify_all();
@@ -152,7 +152,7 @@ TEST_F(PeriodicRunnerImplTest, OnePausableJobRunsCorrectlyWithStart) {
// Fast forward ten times, we should run all ten times.
for (int i = 0; i < 10; i++) {
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return count == i + 1; });
}
clockSource().advance(interval);
@@ -166,14 +166,14 @@ TEST_F(PeriodicRunnerImplTest, OnePausableJobPausesCorrectly) {
bool isPaused = false;
Milliseconds interval{5};
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
// Add a job, ensure that it runs once
PeriodicRunner::PeriodicJob job("job",
[&](Client*) {
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
// This will fail if pause does not work correctly.
ASSERT_FALSE(isPaused);
hasExecuted = true;
@@ -186,12 +186,12 @@ TEST_F(PeriodicRunnerImplTest, OnePausableJobPausesCorrectly) {
jobAnchor.start();
// Wait for the first execution.
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return hasExecuted; });
}
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
isPaused = true;
jobAnchor.pause();
}
@@ -211,13 +211,13 @@ TEST_F(PeriodicRunnerImplTest, OnePausableJobResumesCorrectly) {
int count = 0;
Milliseconds interval{5};
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
PeriodicRunner::PeriodicJob job("job",
[&count, &mutex, &cv](Client*) {
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
count++;
}
cv.notify_all();
@@ -228,7 +228,7 @@ TEST_F(PeriodicRunnerImplTest, OnePausableJobResumesCorrectly) {
jobAnchor.start();
// Wait for the first execution.
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return count == 1; });
}
@@ -242,7 +242,7 @@ TEST_F(PeriodicRunnerImplTest, OnePausableJobResumesCorrectly) {
clockSource().advance(interval);
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
// Wait for count to increment due to job execution.
cv.wait(lk, [&] { return count == i + 1; });
}
@@ -264,7 +264,7 @@ TEST_F(PeriodicRunnerImplTest, OnePausableJobResumesCorrectly) {
// Wait for count to increase. Test will hang if resume() does not work correctly.
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return count > numIterationsBeforePause; });
}
@@ -277,14 +277,14 @@ TEST_F(PeriodicRunnerImplTest, TwoJobsTest) {
Milliseconds intervalA{5};
Milliseconds intervalB{10};
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
// Add two jobs, ensure they both run the proper number of times
PeriodicRunner::PeriodicJob jobA("job",
[&countA, &mutex, &cv](Client*) {
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
countA++;
}
cv.notify_all();
@@ -294,7 +294,7 @@ TEST_F(PeriodicRunnerImplTest, TwoJobsTest) {
PeriodicRunner::PeriodicJob jobB("job",
[&countB, &mutex, &cv](Client*) {
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
countB++;
}
cv.notify_all();
@@ -311,7 +311,7 @@ TEST_F(PeriodicRunnerImplTest, TwoJobsTest) {
for (int i = 0; i <= 10; i++) {
clockSource().advance(intervalA);
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&countA, &countB, &i] { return (countA > i && countB >= i / 2); });
}
}
@@ -320,7 +320,7 @@ TEST_F(PeriodicRunnerImplTest, TwoJobsTest) {
}
TEST_F(PeriodicRunnerImplTest, TwoJobsDontDeadlock) {
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
stdx::condition_variable doneCv;
bool a = false;
@@ -328,7 +328,7 @@ TEST_F(PeriodicRunnerImplTest, TwoJobsDontDeadlock) {
PeriodicRunner::PeriodicJob jobA("job",
[&](Client*) {
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
a = true;
cv.notify_one();
@@ -339,7 +339,7 @@ TEST_F(PeriodicRunnerImplTest, TwoJobsDontDeadlock) {
PeriodicRunner::PeriodicJob jobB("job",
[&](Client*) {
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
b = true;
cv.notify_one();
@@ -357,7 +357,7 @@ TEST_F(PeriodicRunnerImplTest, TwoJobsDontDeadlock) {
clockSource().advance(Milliseconds(1));
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
doneCv.wait(lk, [&] { return a && b; });
ASSERT(a);
@@ -370,14 +370,14 @@ TEST_F(PeriodicRunnerImplTest, TwoJobsDontDeadlock) {
TEST_F(PeriodicRunnerImplTest, ChangingIntervalWorks) {
size_t timesCalled = 0;
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
// Add a job, ensure that it runs once
PeriodicRunner::PeriodicJob job("job",
[&](Client*) {
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
timesCalled++;
}
cv.notify_one();
@@ -388,7 +388,7 @@ TEST_F(PeriodicRunnerImplTest, ChangingIntervalWorks) {
jobAnchor.start();
// Wait for the first execution.
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return timesCalled; });
}
@@ -397,7 +397,7 @@ TEST_F(PeriodicRunnerImplTest, ChangingIntervalWorks) {
// if we change the period to a longer duration, that doesn't trigger a run
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_EQ(timesCalled, 1ul);
}
@@ -405,7 +405,7 @@ TEST_F(PeriodicRunnerImplTest, ChangingIntervalWorks) {
// We actually changed the period
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_EQ(timesCalled, 1ul);
}
@@ -413,7 +413,7 @@ TEST_F(PeriodicRunnerImplTest, ChangingIntervalWorks) {
// Now we hit the new cutoff
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return timesCalled == 2ul; });
}
@@ -421,7 +421,7 @@ TEST_F(PeriodicRunnerImplTest, ChangingIntervalWorks) {
// Haven't hit it
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_EQ(timesCalled, 2ul);
}
@@ -430,7 +430,7 @@ TEST_F(PeriodicRunnerImplTest, ChangingIntervalWorks) {
// shortening triggers the period
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return timesCalled == 3ul; });
}
diff --git a/src/mongo/util/processinfo.h b/src/mongo/util/processinfo.h
index 43cde512599..5040484b46b 100644
--- a/src/mongo/util/processinfo.h
+++ b/src/mongo/util/processinfo.h
@@ -34,8 +34,8 @@
#include <string>
#include "mongo/db/jsobj.h"
+#include "mongo/platform/mutex.h"
#include "mongo/platform/process_id.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/mutex.h"
namespace mongo {
diff --git a/src/mongo/util/producer_consumer_queue.h b/src/mongo/util/producer_consumer_queue.h
index 05b39eff7db..44a87f93aec 100644
--- a/src/mongo/util/producer_consumer_queue.h
+++ b/src/mongo/util/producer_consumer_queue.h
@@ -35,8 +35,8 @@
#include <numeric>
#include "mongo/db/operation_context.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/interruptible.h"
#include "mongo/util/scopeguard.h"
@@ -468,7 +468,7 @@ public:
//
// Leaves T unchanged if an interrupt exception is thrown while waiting for space
void push(T&& t, Interruptible* interruptible = Interruptible::notInterruptible()) {
- _pushRunner([&](stdx::unique_lock<stdx::mutex>& lk) {
+ _pushRunner([&](stdx::unique_lock<Latch>& lk) {
auto cost = _invokeCostFunc(t, lk);
uassert(ErrorCodes::ProducerConsumerQueueBatchTooLarge,
str::stream() << "cost of item (" << cost
@@ -496,7 +496,7 @@ public:
void pushMany(StartIterator start,
EndIterator last,
Interruptible* interruptible = Interruptible::notInterruptible()) {
- return _pushRunner([&](stdx::unique_lock<stdx::mutex>& lk) {
+ return _pushRunner([&](stdx::unique_lock<Latch>& lk) {
size_t cost = 0;
for (auto iter = start; iter != last; ++iter) {
cost += _invokeCostFunc(*iter, lk);
@@ -521,12 +521,12 @@ public:
// Leaves T unchanged if it fails
bool tryPush(T&& t) {
return _pushRunner(
- [&](stdx::unique_lock<stdx::mutex>& lk) { return _tryPush(lk, std::move(t)); });
+ [&](stdx::unique_lock<Latch>& lk) { return _tryPush(lk, std::move(t)); });
}
// Pops one T out of the queue
T pop(Interruptible* interruptible = Interruptible::notInterruptible()) {
- return _popRunner([&](stdx::unique_lock<stdx::mutex>& lk) {
+ return _popRunner([&](stdx::unique_lock<Latch>& lk) {
_waitForNonEmpty(lk, interruptible);
return _pop(lk);
});
@@ -538,7 +538,7 @@ public:
// Returns the popped values, along with the cost value of the items extracted
std::pair<std::deque<T>, size_t> popMany(
Interruptible* interruptible = Interruptible::notInterruptible()) {
- return _popRunner([&](stdx::unique_lock<stdx::mutex>& lk) {
+ return _popRunner([&](stdx::unique_lock<Latch>& lk) {
_waitForNonEmpty(lk, interruptible);
return std::make_pair(std::exchange(_queue, {}), std::exchange(_current, 0));
});
@@ -554,7 +554,7 @@ public:
//
std::pair<std::deque<T>, size_t> popManyUpTo(
size_t budget, Interruptible* interruptible = Interruptible::notInterruptible()) {
- return _popRunner([&](stdx::unique_lock<stdx::mutex>& lk) {
+ return _popRunner([&](stdx::unique_lock<Latch>& lk) {
_waitForNonEmpty(lk, interruptible);
if (_current <= budget) {
@@ -584,13 +584,13 @@ public:
// Attempts a non-blocking pop of a value
boost::optional<T> tryPop() {
- return _popRunner([&](stdx::unique_lock<stdx::mutex>& lk) { return _tryPop(lk); });
+ return _popRunner([&](stdx::unique_lock<Latch>& lk) { return _tryPop(lk); });
}
// Closes the producer end. Consumers will continue to consume until the queue is exhausted, at
// which time they will begin to throw with an interruption dbexception
void closeProducerEnd() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_producerEndClosed = true;
@@ -599,7 +599,7 @@ public:
// Closes the consumer end. This causes all callers to throw with an interruption dbexception
void closeConsumerEnd() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_consumerEndClosed = true;
_producerEndClosed = true;
@@ -608,7 +608,7 @@ public:
}
Stats getStats() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
Stats stats;
stats.queueDepth = _current;
stats.waitingConsumers = _consumers;
@@ -804,7 +804,7 @@ private:
template <typename Callback>
auto _pushRunner(Callback&& cb) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_checkProducerClosed(lk);
@@ -815,7 +815,7 @@ private:
template <typename Callback>
auto _popRunner(Callback&& cb) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_checkConsumerClosed(lk);
@@ -866,9 +866,7 @@ private:
return t;
}
- void _waitForSpace(stdx::unique_lock<stdx::mutex>& lk,
- size_t cost,
- Interruptible* interruptible) {
+ void _waitForSpace(stdx::unique_lock<Latch>& lk, size_t cost, Interruptible* interruptible) {
// We do some pre-flight checks to avoid creating a cv if we don't need one
_checkProducerClosed(lk);
@@ -885,7 +883,7 @@ private:
});
}
- void _waitForNonEmpty(stdx::unique_lock<stdx::mutex>& lk, Interruptible* interruptible) {
+ void _waitForNonEmpty(stdx::unique_lock<Latch>& lk, Interruptible* interruptible) {
typename Consumers::Waiter waiter(_consumers);
interruptible->waitForConditionOrInterrupt(_consumers.cv(), lk, [&] {
@@ -894,7 +892,7 @@ private:
});
}
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ProducerConsumerQueue::_mutex");
Options _options;
diff --git a/src/mongo/util/producer_consumer_queue_test.cpp b/src/mongo/util/producer_consumer_queue_test.cpp
index ba39482d0d0..d474c7bb7f9 100644
--- a/src/mongo/util/producer_consumer_queue_test.cpp
+++ b/src/mongo/util/producer_consumer_queue_test.cpp
@@ -34,8 +34,8 @@
#include "mongo/util/producer_consumer_queue.h"
#include "mongo/db/service_context.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/assert_util.h"
@@ -622,7 +622,7 @@ PRODUCER_CONSUMER_QUEUE_TEST(popManyUpToPopWithBlockingWithSpecialCost,
PRODUCER_CONSUMER_QUEUE_TEST(singleProducerMultiConsumer, runPermutations<false, true>) {
typename Helper::template ProducerConsumerQueue<MoveOnly> pcq{};
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
size_t successes = 0;
size_t failures = 0;
@@ -632,10 +632,10 @@ PRODUCER_CONSUMER_QUEUE_TEST(singleProducerMultiConsumer, runPermutations<false,
{
try {
pcq.pop(opCtx);
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
successes++;
} catch (const ExceptionFor<ErrorCodes::ProducerConsumerQueueConsumed>&) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
failures++;
}
}
@@ -665,7 +665,7 @@ PRODUCER_CONSUMER_QUEUE_TEST(multiProducerSingleConsumer, runPermutations<true,
pcq.push(MoveOnly(1));
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
size_t success = 0;
size_t failure = 0;
@@ -675,10 +675,10 @@ PRODUCER_CONSUMER_QUEUE_TEST(multiProducerSingleConsumer, runPermutations<true,
{
try {
pcq.push(MoveOnly(1), opCtx);
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
success++;
} catch (const ExceptionFor<ErrorCodes::ProducerConsumerQueueEndClosed>&) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
failure++;
}
}
@@ -688,7 +688,7 @@ PRODUCER_CONSUMER_QUEUE_TEST(multiProducerSingleConsumer, runPermutations<true,
pcq.pop();
while (true) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
if (success == 1)
break;
stdx::this_thread::yield();
@@ -744,7 +744,7 @@ PRODUCER_CONSUMER_QUEUE_TEST(multiProducerMiddleWaiterBreaks, runPermutations<tr
pcq.push(MoveOnly(1));
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
bool failed = false;
OperationContext* threadBopCtx = nullptr;
@@ -757,7 +757,7 @@ PRODUCER_CONSUMER_QUEUE_TEST(multiProducerMiddleWaiterBreaks, runPermutations<tr
auto threadB = helper.runThread("ProducerB", [&](OperationContext* opCtx) {
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
threadBopCtx = opCtx;
}
@@ -773,7 +773,7 @@ PRODUCER_CONSUMER_QUEUE_TEST(multiProducerMiddleWaiterBreaks, runPermutations<tr
};
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT(threadBopCtx != nullptr);
}
diff --git a/src/mongo/util/queue.h b/src/mongo/util/queue.h
index c3a56d4db21..ec927066172 100644
--- a/src/mongo/util/queue.h
+++ b/src/mongo/util/queue.h
@@ -34,9 +34,9 @@
#include <limits>
#include <queue>
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/chrono.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -61,12 +61,12 @@ public:
BlockingQueue(size_t size, GetSizeFn f) : _maxSize(size), _getSize(f) {}
void pushEvenIfFull(T const& t) {
- stdx::unique_lock<stdx::mutex> lk(_lock);
+ stdx::unique_lock<Latch> lk(_lock);
pushImpl_inlock(t, _getSize(t));
}
void push(T const& t) {
- stdx::unique_lock<stdx::mutex> lk(_lock);
+ stdx::unique_lock<Latch> lk(_lock);
_clearing = false;
size_t tSize = _getSize(t);
_waitForSpace_inlock(tSize, lk);
@@ -89,7 +89,7 @@ public:
return;
}
- stdx::unique_lock<stdx::mutex> lk(_lock);
+ stdx::unique_lock<Latch> lk(_lock);
const auto startedEmpty = _queue.empty();
_clearing = false;
@@ -111,12 +111,12 @@ public:
* NOTE: Should only be used in a single producer case.
*/
void waitForSpace(size_t size) {
- stdx::unique_lock<stdx::mutex> lk(_lock);
+ stdx::unique_lock<Latch> lk(_lock);
_waitForSpace_inlock(size, lk);
}
bool empty() const {
- stdx::lock_guard<stdx::mutex> lk(_lock);
+ stdx::lock_guard<Latch> lk(_lock);
return _queue.empty();
}
@@ -124,7 +124,7 @@ public:
* The size as measured by the size function. Default to counting each item
*/
size_t size() const {
- stdx::lock_guard<stdx::mutex> lk(_lock);
+ stdx::lock_guard<Latch> lk(_lock);
return _currentSize;
}
@@ -139,12 +139,12 @@ public:
* The number/count of items in the queue ( _queue.size() )
*/
size_t count() const {
- stdx::lock_guard<stdx::mutex> lk(_lock);
+ stdx::lock_guard<Latch> lk(_lock);
return _queue.size();
}
void clear() {
- stdx::lock_guard<stdx::mutex> lk(_lock);
+ stdx::lock_guard<Latch> lk(_lock);
_clearing = true;
_queue = std::queue<T>();
_currentSize = 0;
@@ -153,7 +153,7 @@ public:
}
bool tryPop(T& t) {
- stdx::lock_guard<stdx::mutex> lk(_lock);
+ stdx::lock_guard<Latch> lk(_lock);
if (_queue.empty())
return false;
@@ -166,7 +166,7 @@ public:
}
T blockingPop() {
- stdx::unique_lock<stdx::mutex> lk(_lock);
+ stdx::unique_lock<Latch> lk(_lock);
_clearing = false;
while (_queue.empty() && !_clearing)
_cvNoLongerEmpty.wait(lk);
@@ -191,7 +191,7 @@ public:
bool blockingPop(T& t, int maxSecondsToWait) {
using namespace stdx::chrono;
const auto deadline = system_clock::now() + seconds(maxSecondsToWait);
- stdx::unique_lock<stdx::mutex> lk(_lock);
+ stdx::unique_lock<Latch> lk(_lock);
_clearing = false;
while (_queue.empty() && !_clearing) {
if (stdx::cv_status::timeout == _cvNoLongerEmpty.wait_until(lk, deadline))
@@ -213,7 +213,7 @@ public:
bool blockingPeek(T& t, int maxSecondsToWait) {
using namespace stdx::chrono;
const auto deadline = system_clock::now() + seconds(maxSecondsToWait);
- stdx::unique_lock<stdx::mutex> lk(_lock);
+ stdx::unique_lock<Latch> lk(_lock);
_clearing = false;
while (_queue.empty() && !_clearing) {
if (stdx::cv_status::timeout == _cvNoLongerEmpty.wait_until(lk, deadline))
@@ -229,7 +229,7 @@ public:
// Obviously, this should only be used when you have
// only one consumer
bool peek(T& t) {
- stdx::unique_lock<stdx::mutex> lk(_lock);
+ stdx::unique_lock<Latch> lk(_lock);
if (_queue.empty()) {
return false;
}
@@ -242,7 +242,7 @@ public:
* Returns the item most recently added to the queue or nothing if the queue is empty.
*/
boost::optional<T> lastObjectPushed() const {
- stdx::unique_lock<stdx::mutex> lk(_lock);
+ stdx::unique_lock<Latch> lk(_lock);
if (_queue.empty()) {
return {};
}
@@ -254,7 +254,7 @@ private:
/**
* Returns when enough space is available.
*/
- void _waitForSpace_inlock(size_t size, stdx::unique_lock<stdx::mutex>& lk) {
+ void _waitForSpace_inlock(size_t size, stdx::unique_lock<Latch>& lk) {
while (_currentSize + size > _maxSize) {
_cvNoLongerFull.wait(lk);
}
@@ -268,7 +268,7 @@ private:
_cvNoLongerEmpty.notify_one();
}
- mutable stdx::mutex _lock;
+ mutable Mutex _lock = MONGO_MAKE_LATCH("BlockingQueue::_lock");
std::queue<T> _queue;
const size_t _maxSize;
size_t _currentSize = 0;
diff --git a/src/mongo/util/signal_handlers_synchronous.cpp b/src/mongo/util/signal_handlers_synchronous.cpp
index da60a37fd1f..9d9e6896c03 100644
--- a/src/mongo/util/signal_handlers_synchronous.cpp
+++ b/src/mongo/util/signal_handlers_synchronous.cpp
@@ -157,12 +157,12 @@ public:
}
private:
- static stdx::mutex _streamMutex;
+ static stdx::mutex _streamMutex; // NOLINT
static thread_local int terminateDepth;
stdx::unique_lock<stdx::mutex> _lk;
};
-stdx::mutex MallocFreeOStreamGuard::_streamMutex;
+stdx::mutex MallocFreeOStreamGuard::_streamMutex; // NOLINT
thread_local int MallocFreeOStreamGuard::terminateDepth = 0;
// must hold MallocFreeOStreamGuard to call
diff --git a/src/mongo/util/stacktrace_windows.cpp b/src/mongo/util/stacktrace_windows.cpp
index cbf449a75d2..00e734b0555 100644
--- a/src/mongo/util/stacktrace_windows.cpp
+++ b/src/mongo/util/stacktrace_windows.cpp
@@ -122,7 +122,7 @@ public:
private:
boost::optional<HANDLE> _processHandle;
- stdx::mutex _mutex;
+ stdx::mutex _mutex; // NOLINT
DWORD _origOptions;
};
diff --git a/src/mongo/util/synchronized_value.h b/src/mongo/util/synchronized_value.h
index a49585b9426..28033cd7bf8 100644
--- a/src/mongo/util/synchronized_value.h
+++ b/src/mongo/util/synchronized_value.h
@@ -29,7 +29,7 @@
#pragma once
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -46,7 +46,7 @@ public:
/**
* Take lock on construction to guard value.
*/
- explicit update_guard(T& value, stdx::mutex& mtx) : _lock(mtx), _value(value) {}
+ explicit update_guard(T& value, Mutex& mtx) : _lock(mtx), _value(value) {}
~update_guard() = default;
// Only move construction is permitted so that synchronized_value may return update_guard
@@ -81,7 +81,7 @@ public:
private:
// Held lock from synchronized_value
- stdx::unique_lock<stdx::mutex> _lock;
+ stdx::unique_lock<Latch> _lock;
// Reference to the value from synchronized_value
T& _value;
@@ -96,7 +96,7 @@ public:
/**
* Take lock on construction to guard value.
*/
- explicit const_update_guard(const T& value, stdx::mutex& mtx) : _lock(mtx), _value(value) {}
+ explicit const_update_guard(const T& value, Mutex& mtx) : _lock(mtx), _value(value) {}
~const_update_guard() = default;
// Only move construction is permitted so that synchronized_value may return const_update_guard
@@ -121,7 +121,7 @@ public:
private:
// Held lock from synchronized_value
- stdx::unique_lock<stdx::mutex> _lock;
+ stdx::unique_lock<Latch> _lock;
// Reference to the value from synchronized_value
const T& _value;
@@ -156,7 +156,7 @@ public:
// Support assigning from the contained value
synchronized_value& operator=(const T& value) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_value = value;
}
return *this;
@@ -164,7 +164,7 @@ public:
synchronized_value& operator=(T&& value) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_value = std::move(value);
}
return *this;
@@ -174,7 +174,7 @@ public:
* Return a copy of the protected object.
*/
T get() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _value;
}
@@ -204,26 +204,26 @@ public:
bool operator==(synchronized_value const& rhs) const {
// TODO: C++17 - move from std::lock to std::scoped_lock
std::lock(_mutex, rhs._mutex);
- stdx::lock_guard<stdx::mutex> lk1(_mutex, stdx::adopt_lock);
- stdx::lock_guard<stdx::mutex> lk2(rhs._mutex, stdx::adopt_lock);
+ stdx::lock_guard<Latch> lk1(_mutex, stdx::adopt_lock);
+ stdx::lock_guard<Latch> lk2(rhs._mutex, stdx::adopt_lock);
return _value == rhs._value;
}
bool operator!=(synchronized_value const& rhs) const {
// TODO: C++17 - move from std::lock to std::scoped_lock
std::lock(_mutex, rhs._mutex);
- stdx::lock_guard<stdx::mutex> lk1(_mutex, stdx::adopt_lock);
- stdx::lock_guard<stdx::mutex> lk2(rhs._mutex, stdx::adopt_lock);
+ stdx::lock_guard<Latch> lk1(_mutex, stdx::adopt_lock);
+ stdx::lock_guard<Latch> lk2(rhs._mutex, stdx::adopt_lock);
return _value != rhs._value;
}
bool operator==(T const& rhs) const {
- stdx::lock_guard<stdx::mutex> lock1(_mutex);
+ stdx::lock_guard<Latch> lock1(_mutex);
return _value == rhs;
}
bool operator!=(T const& rhs) const {
- stdx::lock_guard<stdx::mutex> lock1(_mutex);
+ stdx::lock_guard<Latch> lock1(_mutex);
return _value != rhs;
}
@@ -250,12 +250,12 @@ private:
T _value;
// Mutex to guard value
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("synchronized_value::_mutex");
};
template <class T>
bool operator==(const synchronized_value<T>& lhs, const T& rhs) {
- stdx::lock_guard<stdx::mutex> lock(lhs._mutex);
+ stdx::lock_guard<Latch> lock(lhs._mutex);
return lhs._value == rhs;
}
@@ -267,7 +267,7 @@ bool operator!=(const synchronized_value<T>& lhs, const T& rhs) {
template <class T>
bool operator==(const T& lhs, const synchronized_value<T>& rhs) {
- stdx::lock_guard<stdx::mutex> lock(rhs._mutex);
+ stdx::lock_guard<Latch> lock(rhs._mutex);
return lhs == rhs._value;
}
@@ -281,8 +281,8 @@ template <class T>
bool operator==(const synchronized_value<T>& lhs, const synchronized_value<T>& rhs) {
// TODO: C++17 - move from std::lock to std::scoped_lock
std::lock(lhs._mutex, rhs._mutex);
- stdx::lock_guard<stdx::mutex> lk1(lhs._mutex, stdx::adopt_lock);
- stdx::lock_guard<stdx::mutex> lk2(rhs._mutex, stdx::adopt_lock);
+ stdx::lock_guard<Latch> lk1(lhs._mutex, stdx::adopt_lock);
+ stdx::lock_guard<Latch> lk2(rhs._mutex, stdx::adopt_lock);
return lhs._value == rhs._value;
}
diff --git a/src/mongo/util/time_support.h b/src/mongo/util/time_support.h
index 3639d41efd3..9511a735f5c 100644
--- a/src/mongo/util/time_support.h
+++ b/src/mongo/util/time_support.h
@@ -36,8 +36,8 @@
#include "mongo/base/status_with.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/chrono.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/duration.h"
namespace mongo {
diff --git a/src/mongo/util/uuid.cpp b/src/mongo/util/uuid.cpp
index 66835454a0c..d729777cf30 100644
--- a/src/mongo/util/uuid.cpp
+++ b/src/mongo/util/uuid.cpp
@@ -34,15 +34,15 @@
#include "mongo/util/uuid.h"
#include "mongo/bson/bsonobjbuilder.h"
+#include "mongo/platform/mutex.h"
#include "mongo/platform/random.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/hex.h"
namespace mongo {
namespace {
-stdx::mutex uuidGenMutex;
+Mutex uuidGenMutex;
auto uuidGen = SecureRandom::create();
// Regex to match valid version 4 UUIDs with variant bits set
@@ -100,7 +100,7 @@ UUID UUID::gen() {
int64_t randomWords[2];
{
- stdx::lock_guard<stdx::mutex> lk(uuidGenMutex);
+ stdx::lock_guard<Latch> lk(uuidGenMutex);
// Generate 128 random bits
randomWords[0] = uuidGen->nextInt64();