summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAdam Midvidy <amidvidy@gmail.com>2015-06-17 10:27:13 -0400
committerAdam Midvidy <amidvidy@gmail.com>2015-06-17 13:32:39 -0400
commit449e0f2b47e32060433cb6f68d967ea53c8573d1 (patch)
tree6e857f03a636b34ad8710e0b3e35a9115523483b
parentcbc69e7dcb875b35e161875317bca701b48c770c (diff)
downloadmongo-449e0f2b47e32060433cb6f68d967ea53c8573d1.tar.gz
SERVER-18723 boost -> stdx for mutex, unique_lock, and lock_guard
-rw-r--r--src/mongo/client/connection_pool.cpp13
-rw-r--r--src/mongo/client/connection_pool.h4
-rw-r--r--src/mongo/client/connection_string.h9
-rw-r--r--src/mongo/client/connection_string_connect.cpp4
-rw-r--r--src/mongo/client/connpool.cpp18
-rw-r--r--src/mongo/client/replica_set_monitor.cpp30
-rw-r--r--src/mongo/client/replica_set_monitor_internal.h2
-rw-r--r--src/mongo/client/replica_set_monitor_manager.cpp15
-rw-r--r--src/mongo/client/syncclusterconnection.cpp4
-rw-r--r--src/mongo/db/auth/authorization_manager.cpp6
-rw-r--r--src/mongo/db/auth/authorization_manager.h6
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_local.cpp10
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_local.h2
-rw-r--r--src/mongo/db/auth/internal_user_auth.cpp6
-rw-r--r--src/mongo/db/auth/user_cache_invalidator_job.cpp8
-rw-r--r--src/mongo/db/background.cpp22
-rw-r--r--src/mongo/db/commands/dbhash.cpp4
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp32
-rw-r--r--src/mongo/db/concurrency/d_concurrency.cpp2
-rw-r--r--src/mongo/db/concurrency/lock_manager.h4
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp6
-rw-r--r--src/mongo/db/concurrency/lock_state.h2
-rw-r--r--src/mongo/db/concurrency/lock_state_test.cpp2
-rw-r--r--src/mongo/db/curop.cpp2
-rw-r--r--src/mongo/db/global_timestamp.cpp6
-rw-r--r--src/mongo/db/index_builder.cpp6
-rw-r--r--src/mongo/db/instance.cpp20
-rw-r--r--src/mongo/db/query/plan_cache.cpp18
-rw-r--r--src/mongo/db/query/plan_cache.h4
-rw-r--r--src/mongo/db/query/query_settings.cpp10
-rw-r--r--src/mongo/db/query/query_settings.h5
-rw-r--r--src/mongo/db/range_deleter.cpp32
-rw-r--r--src/mongo/db/range_deleter_mock_env.cpp24
-rw-r--r--src/mongo/db/repl/bgsync.cpp42
-rw-r--r--src/mongo/db/repl/bgsync.h12
-rw-r--r--src/mongo/db/repl/check_quorum_for_config_change_test.cpp6
-rw-r--r--src/mongo/db/repl/oplog.cpp6
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp4
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.h4
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_mock.cpp8
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_mock.h4
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp110
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.h10
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp14
-rw-r--r--src/mongo/db/repl/replication_executor.cpp42
-rw-r--r--src/mongo/db/repl/replication_executor.h8
-rw-r--r--src/mongo/db/repl/reporter.cpp14
-rw-r--r--src/mongo/db/repl/reporter.h2
-rw-r--r--src/mongo/db/repl/sync_source_feedback.cpp14
-rw-r--r--src/mongo/db/repl/sync_source_feedback.h4
-rw-r--r--src/mongo/db/repl/task_runner.cpp18
-rw-r--r--src/mongo/db/repl/task_runner.h4
-rw-r--r--src/mongo/db/service_context.cpp6
-rw-r--r--src/mongo/db/service_context.h4
-rw-r--r--src/mongo/db/stats/lock_server_status_section.cpp2
-rw-r--r--src/mongo/db/stats/snapshots.cpp4
-rw-r--r--src/mongo/db/storage/in_memory/in_memory_engine.cpp8
-rw-r--r--src/mongo/db/storage/in_memory/in_memory_engine.h5
-rw-r--r--src/mongo/db/storage/kv/kv_catalog.cpp18
-rw-r--r--src/mongo/db/storage/kv/kv_catalog.h5
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine.cpp10
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine.h5
-rw-r--r--src/mongo/db/storage/mmap_v1/dur.cpp8
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_journal.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_recover.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp12
-rw-r--r--src/mongo/db/storage/mmap_v1/file_allocator.cpp18
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap.h2
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp6
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_engine.h7
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_windows.cpp18
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp8
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h5
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp18
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h7
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp10
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp8
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp8
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h3
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp20
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h6
-rw-r--r--src/mongo/dbtests/documentsourcetests.cpp4
-rw-r--r--src/mongo/dbtests/framework.cpp6
-rw-r--r--src/mongo/dbtests/mock/mock_conn_registry.cpp8
-rw-r--r--src/mongo/dbtests/perftests.cpp10
-rw-r--r--src/mongo/dbtests/threadedtests.cpp4
-rw-r--r--src/mongo/executor/network_interface_impl.cpp18
-rw-r--r--src/mongo/executor/network_interface_impl.h4
-rw-r--r--src/mongo/executor/network_interface_mock.cpp40
-rw-r--r--src/mongo/executor/network_interface_mock.h8
-rw-r--r--src/mongo/logger/console.cpp6
-rw-r--r--src/mongo/logger/console.h5
-rw-r--r--src/mongo/logger/ramlog.cpp8
-rw-r--r--src/mongo/logger/ramlog.h13
-rw-r--r--src/mongo/logger/rotatable_file_writer.h6
-rw-r--r--src/mongo/s/catalog/catalog_cache.cpp6
-rw-r--r--src/mongo/s/catalog/catalog_cache.h4
-rw-r--r--src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp8
-rw-r--r--src/mongo/s/catalog/legacy/catalog_manager_legacy.h2
-rw-r--r--src/mongo/s/catalog/legacy/distlock.cpp4
-rw-r--r--src/mongo/s/catalog/legacy/legacy_dist_lock_manager.cpp16
-rw-r--r--src/mongo/s/catalog/legacy/legacy_dist_lock_manager.h2
-rw-r--r--src/mongo/s/catalog/legacy/legacy_dist_lock_pinger.cpp20
-rw-r--r--src/mongo/s/client/multi_host_query.cpp2
-rw-r--r--src/mongo/s/client/multi_host_query.h10
-rw-r--r--src/mongo/s/client/shard_connection.cpp6
-rw-r--r--src/mongo/s/client/shard_registry.cpp3
-rw-r--r--src/mongo/s/config.cpp32
-rw-r--r--src/mongo/s/cursors.cpp28
-rw-r--r--src/mongo/s/d_state.cpp54
-rw-r--r--src/mongo/s/version_manager.cpp8
-rw-r--r--src/mongo/scripting/engine.cpp4
-rw-r--r--src/mongo/scripting/engine_v8-3.25.cpp14
-rw-r--r--src/mongo/scripting/engine_v8.cpp14
-rw-r--r--src/mongo/scripting/v8-3.25_utils.cpp22
-rw-r--r--src/mongo/scripting/v8_deadline_monitor.h10
-rw-r--r--src/mongo/scripting/v8_deadline_monitor_test.cpp4
-rw-r--r--src/mongo/scripting/v8_utils.cpp22
-rw-r--r--src/mongo/shell/bench.cpp16
-rw-r--r--src/mongo/shell/bench.h6
-rw-r--r--src/mongo/shell/clientAndShell.cpp2
-rw-r--r--src/mongo/shell/dbshell.cpp4
-rw-r--r--src/mongo/shell/shell_utils.cpp8
-rw-r--r--src/mongo/shell/shell_utils.h3
-rw-r--r--src/mongo/shell/shell_utils_launcher.cpp38
-rw-r--r--src/mongo/util/background_job_test.cpp4
-rw-r--r--src/mongo/util/concurrency/mutex.h5
-rw-r--r--src/mongo/util/concurrency/rwlockimpl.cpp32
-rw-r--r--src/mongo/util/concurrency/synchronization.cpp16
-rw-r--r--src/mongo/util/concurrency/thread_pool.cpp8
-rw-r--r--src/mongo/util/concurrency/thread_pool.h4
-rw-r--r--src/mongo/util/fail_point.cpp4
-rw-r--r--src/mongo/util/fail_point.h5
-rw-r--r--src/mongo/util/fail_point_test.cpp13
-rw-r--r--src/mongo/util/net/listen.cpp8
-rw-r--r--src/mongo/util/net/listen.h30
-rw-r--r--src/mongo/util/net/message_port.cpp6
-rw-r--r--src/mongo/util/signal_handlers_synchronous.cpp10
142 files changed, 778 insertions, 788 deletions
diff --git a/src/mongo/client/connection_pool.cpp b/src/mongo/client/connection_pool.cpp
index b7ebda533aa..17628610135 100644
--- a/src/mongo/client/connection_pool.cpp
+++ b/src/mongo/client/connection_pool.cpp
@@ -30,11 +30,10 @@
#include "mongo/client/connection_pool.h"
-#include <boost/thread/lock_guard.hpp>
-
#include "mongo/client/connpool.h"
#include "mongo/db/auth/authorization_manager_global.h"
#include "mongo/db/auth/internal_user_auth.h"
+#include "mongo/stdx/mutex.h"
namespace mongo {
namespace {
@@ -56,7 +55,7 @@ namespace {
}
void ConnectionPool::cleanUpOlderThan(Date_t now) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_cleanUpOlderThan_inlock(now);
}
@@ -95,7 +94,7 @@ namespace {
}
void ConnectionPool::closeAllInUseConnections() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
for (ConnectionList::iterator iter = _inUseConnections.begin();
iter != _inUseConnections.end();
++iter) {
@@ -126,7 +125,7 @@ namespace {
const HostAndPort& target,
Date_t now,
Milliseconds timeout) {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
// Clean up connections on stale/unused hosts
_cleanUpStaleHosts_inlock(now);
@@ -196,7 +195,7 @@ namespace {
}
void ConnectionPool::releaseConnection(ConnectionList::iterator iter, const Date_t now) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
if (!_shouldKeepConnection(now, *iter)) {
_destroyConnection_inlock(&_inUseConnections, iter);
return;
@@ -209,7 +208,7 @@ namespace {
}
void ConnectionPool::destroyConnection(ConnectionList::iterator iter) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_destroyConnection_inlock(&_inUseConnections, iter);
}
diff --git a/src/mongo/client/connection_pool.h b/src/mongo/client/connection_pool.h
index daf36d87337..cce69e03c22 100644
--- a/src/mongo/client/connection_pool.h
+++ b/src/mongo/client/connection_pool.h
@@ -28,13 +28,13 @@
#pragma once
-#include <boost/thread/mutex.hpp>
#include <map>
#include "mongo/base/disallow_copying.h"
#include "mongo/client/dbclientinterface.h"
#include "mongo/platform/unordered_map.h"
#include "mongo/stdx/list.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/net/hostandport.h"
#include "mongo/util/time_support.h"
@@ -188,7 +188,7 @@ namespace mongo {
const int _messagingPortTags;
// Mutex guarding members of the connection pool
- boost::mutex _mutex;
+ stdx::mutex _mutex;
// Map from HostAndPort to idle connections.
HostConnectionMap _connections;
diff --git a/src/mongo/client/connection_string.h b/src/mongo/client/connection_string.h
index 3c5fe14a863..7b6b2f04a72 100644
--- a/src/mongo/client/connection_string.h
+++ b/src/mongo/client/connection_string.h
@@ -28,12 +28,11 @@
#pragma once
-#include <boost/thread/lock_guard.hpp>
-#include <boost/thread/mutex.hpp>
#include <string>
#include <vector>
#include "mongo/base/string_data.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/net/hostandport.h"
@@ -122,12 +121,12 @@ namespace mongo {
};
static void setConnectionHook( ConnectionHook* hook ){
- boost::lock_guard<boost::mutex> lk( _connectHookMutex );
+ stdx::lock_guard<stdx::mutex> lk( _connectHookMutex );
_connectHook = hook;
}
static ConnectionHook* getConnectionHook() {
- boost::lock_guard<boost::mutex> lk( _connectHookMutex );
+ stdx::lock_guard<stdx::mutex> lk( _connectHookMutex );
return _connectHook;
}
@@ -151,7 +150,7 @@ namespace mongo {
std::string _string;
std::string _setName;
- static boost::mutex _connectHookMutex;
+ static stdx::mutex _connectHookMutex;
static ConnectionHook* _connectHook;
};
} // namespace mongo
diff --git a/src/mongo/client/connection_string_connect.cpp b/src/mongo/client/connection_string_connect.cpp
index 599c0a5f189..2b4b6a06df3 100644
--- a/src/mongo/client/connection_string_connect.cpp
+++ b/src/mongo/client/connection_string_connect.cpp
@@ -43,7 +43,7 @@
namespace mongo {
- boost::mutex ConnectionString::_connectHookMutex;
+ stdx::mutex ConnectionString::_connectHookMutex;
ConnectionString::ConnectionHook* ConnectionString::_connectHook = NULL;
DBClientBase* ConnectionString::connect( std::string& errmsg, double socketTimeout ) const {
@@ -82,7 +82,7 @@ namespace mongo {
case CUSTOM: {
// Lock in case other things are modifying this at the same time
- boost::lock_guard<boost::mutex> lk( _connectHookMutex );
+ stdx::lock_guard<stdx::mutex> lk( _connectHookMutex );
// Allow the replacement of connections with other connections - useful for testing.
diff --git a/src/mongo/client/connpool.cpp b/src/mongo/client/connpool.cpp
index 79f0aa9801c..b56bec49551 100644
--- a/src/mongo/client/connpool.cpp
+++ b/src/mongo/client/connpool.cpp
@@ -187,7 +187,7 @@ namespace mongo {
DBClientBase* DBConnectionPool::_get(const string& ident , double socketTimeout ) {
uassert(17382, "Can't use connection pool during shutdown",
!inShutdown());
- boost::lock_guard<boost::mutex> L(_mutex);
+ stdx::lock_guard<stdx::mutex> L(_mutex);
PoolForHost& p = _pools[PoolKey(ident,socketTimeout)];
p.setMaxPoolSize(_maxPoolSize);
p.initializeHostName(ident);
@@ -196,7 +196,7 @@ namespace mongo {
DBClientBase* DBConnectionPool::_finishCreate( const string& host , double socketTimeout , DBClientBase* conn ) {
{
- boost::lock_guard<boost::mutex> L(_mutex);
+ stdx::lock_guard<stdx::mutex> L(_mutex);
PoolForHost& p = _pools[PoolKey(host,socketTimeout)];
p.setMaxPoolSize(_maxPoolSize);
p.initializeHostName(host);
@@ -270,7 +270,7 @@ namespace mongo {
void DBConnectionPool::release(const string& host, DBClientBase *c) {
onRelease(c);
- boost::lock_guard<boost::mutex> L(_mutex);
+ stdx::lock_guard<stdx::mutex> L(_mutex);
_pools[PoolKey(host,c->getSoTimeout())].done(this,c);
}
@@ -280,7 +280,7 @@ namespace mongo {
}
void DBConnectionPool::flush() {
- boost::lock_guard<boost::mutex> L(_mutex);
+ stdx::lock_guard<stdx::mutex> L(_mutex);
for ( PoolMap::iterator i = _pools.begin(); i != _pools.end(); i++ ) {
PoolForHost& p = i->second;
p.flush();
@@ -288,7 +288,7 @@ namespace mongo {
}
void DBConnectionPool::clear() {
- boost::lock_guard<boost::mutex> L(_mutex);
+ stdx::lock_guard<stdx::mutex> L(_mutex);
LOG(2) << "Removing connections on all pools owned by " << _name << endl;
for (PoolMap::iterator iter = _pools.begin(); iter != _pools.end(); ++iter) {
iter->second.clear();
@@ -296,7 +296,7 @@ namespace mongo {
}
void DBConnectionPool::removeHost( const string& host ) {
- boost::lock_guard<boost::mutex> L(_mutex);
+ stdx::lock_guard<stdx::mutex> L(_mutex);
LOG(2) << "Removing connections from all pools for host: " << host << endl;
for ( PoolMap::iterator i = _pools.begin(); i != _pools.end(); ++i ) {
const string& poolHost = i->first.ident;
@@ -348,7 +348,7 @@ namespace mongo {
BSONObjBuilder bb( b.subobjStart( "hosts" ) );
{
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
for ( PoolMap::iterator i=_pools.begin(); i!=_pools.end(); ++i ) {
if ( i->second.numCreated() == 0 )
continue;
@@ -432,7 +432,7 @@ namespace mongo {
}
{
- boost::lock_guard<boost::mutex> sl(_mutex);
+ stdx::lock_guard<stdx::mutex> sl(_mutex);
PoolForHost& pool = _pools[PoolKey(hostName, conn->getSoTimeout())];
if (pool.isBadSocketCreationTime(conn->getSockCreationMicroSec())) {
return false;
@@ -448,7 +448,7 @@ namespace mongo {
{
// we need to get the connections inside the lock
// but we can actually delete them outside
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
for ( PoolMap::iterator i=_pools.begin(); i!=_pools.end(); ++i ) {
i->second.getStaleConnections( toDelete );
}
diff --git a/src/mongo/client/replica_set_monitor.cpp b/src/mongo/client/replica_set_monitor.cpp
index 6a6063c852e..f8576782fcd 100644
--- a/src/mongo/client/replica_set_monitor.cpp
+++ b/src/mongo/client/replica_set_monitor.cpp
@@ -97,7 +97,7 @@ namespace {
virtual string name() const { return "ReplicaSetMonitorWatcher"; }
void safeGo() {
- boost::lock_guard<boost::mutex> lk( _monitorMutex );
+ stdx::lock_guard<stdx::mutex> lk( _monitorMutex );
if ( _started )
return;
@@ -111,7 +111,7 @@ namespace {
* Stops monitoring the sets and wait for the monitoring thread to terminate.
*/
void stop() {
- boost::lock_guard<boost::mutex> sl( _monitorMutex );
+ stdx::lock_guard<stdx::mutex> sl( _monitorMutex );
_stopRequested = true;
_stopRequestedCV.notify_one();
}
@@ -125,14 +125,14 @@ namespace {
// Should not be needed after SERVER-7533 gets implemented and tests start
// using it.
if (!inShutdown() && !StaticObserver::_destroyingStatics) {
- boost::unique_lock<boost::mutex> sl( _monitorMutex );
+ stdx::unique_lock<stdx::mutex> sl( _monitorMutex );
_stopRequestedCV.timed_wait(sl, boost::posix_time::seconds(10));
}
while ( !inShutdown() &&
!StaticObserver::_destroyingStatics ) {
{
- boost::lock_guard<boost::mutex> sl( _monitorMutex );
+ stdx::lock_guard<stdx::mutex> sl( _monitorMutex );
if (_stopRequested) {
break;
}
@@ -148,7 +148,7 @@ namespace {
error() << "unknown error";
}
- boost::unique_lock<boost::mutex> sl( _monitorMutex );
+ stdx::unique_lock<stdx::mutex> sl( _monitorMutex );
if (_stopRequested) {
break;
}
@@ -254,7 +254,7 @@ namespace {
HostAndPort ReplicaSetMonitor::getHostOrRefresh(const ReadPreferenceSetting& criteria) {
{
- boost::lock_guard<boost::mutex> lk(_state->mutex);
+ stdx::lock_guard<stdx::mutex> lk(_state->mutex);
HostAndPort out = _state->getMatchingHost(criteria);
if (!out.empty())
return out;
@@ -283,7 +283,7 @@ namespace {
}
Refresher ReplicaSetMonitor::startOrContinueRefresh() {
- boost::lock_guard<boost::mutex> lk(_state->mutex);
+ stdx::lock_guard<stdx::mutex> lk(_state->mutex);
Refresher out(_state);
DEV _state->checkInvariants();
@@ -291,7 +291,7 @@ namespace {
}
void ReplicaSetMonitor::failedHost(const HostAndPort& host) {
- boost::lock_guard<boost::mutex> lk(_state->mutex);
+ stdx::lock_guard<stdx::mutex> lk(_state->mutex);
Node* node = _state->findNode(host);
if (node)
node->markFailed();
@@ -299,19 +299,19 @@ namespace {
}
bool ReplicaSetMonitor::isPrimary(const HostAndPort& host) const {
- boost::lock_guard<boost::mutex> lk(_state->mutex);
+ stdx::lock_guard<stdx::mutex> lk(_state->mutex);
Node* node = _state->findNode(host);
return node ? node->isMaster : false;
}
bool ReplicaSetMonitor::isHostUp(const HostAndPort& host) const {
- boost::lock_guard<boost::mutex> lk(_state->mutex);
+ stdx::lock_guard<stdx::mutex> lk(_state->mutex);
Node* node = _state->findNode(host);
return node ? node->isUp : false;
}
int ReplicaSetMonitor::getConsecutiveFailedScans() const {
- boost::lock_guard<boost::mutex> lk(_state->mutex);
+ stdx::lock_guard<stdx::mutex> lk(_state->mutex);
return _state->consecutiveFailedScans;
}
@@ -321,12 +321,12 @@ namespace {
}
std::string ReplicaSetMonitor::getServerAddress() const {
- boost::lock_guard<boost::mutex> lk(_state->mutex);
+ stdx::lock_guard<stdx::mutex> lk(_state->mutex);
return _state->getServerAddress();
}
bool ReplicaSetMonitor::contains(const HostAndPort& host) const {
- boost::lock_guard<boost::mutex> lk(_state->mutex);
+ stdx::lock_guard<stdx::mutex> lk(_state->mutex);
return _state->seedNodes.count(host);
}
@@ -357,7 +357,7 @@ namespace {
// TODO move to correct order with non-statics before pushing
void ReplicaSetMonitor::appendInfo(BSONObjBuilder& bsonObjBuilder) const {
- boost::lock_guard<boost::mutex> lk(_state->mutex);
+ stdx::lock_guard<stdx::mutex> lk(_state->mutex);
// NOTE: the format here must be consistent for backwards compatibility
BSONArrayBuilder hosts(bsonObjBuilder.subarrayStart("hosts"));
@@ -682,7 +682,7 @@ namespace {
}
HostAndPort Refresher::_refreshUntilMatches(const ReadPreferenceSetting* criteria) {
- boost::unique_lock<boost::mutex> lk(_set->mutex);
+ stdx::unique_lock<stdx::mutex> lk(_set->mutex);
while (true) {
if (criteria) {
HostAndPort out = _set->getMatchingHost(*criteria);
diff --git a/src/mongo/client/replica_set_monitor_internal.h b/src/mongo/client/replica_set_monitor_internal.h
index f840c2a1f65..a0168d21dcf 100644
--- a/src/mongo/client/replica_set_monitor_internal.h
+++ b/src/mongo/client/replica_set_monitor_internal.h
@@ -162,7 +162,7 @@ namespace mongo {
*/
void checkInvariants() const;
- boost::mutex mutex; // must hold this to access any other member or method (except name).
+ stdx::mutex mutex; // must hold this to access any other member or method (except name).
// If Refresher::getNextStep returns WAIT, you should wait on the condition_variable,
// releasing mutex. It will be notified when either getNextStep will return something other
diff --git a/src/mongo/client/replica_set_monitor_manager.cpp b/src/mongo/client/replica_set_monitor_manager.cpp
index ac1f6b8e6e7..393ded3cee1 100644
--- a/src/mongo/client/replica_set_monitor_manager.cpp
+++ b/src/mongo/client/replica_set_monitor_manager.cpp
@@ -30,11 +30,10 @@
#include "mongo/client/replica_set_monitor_manager.h"
-#include <boost/thread/lock_guard.hpp>
-
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/client/connection_string.h"
#include "mongo/client/replica_set_monitor.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/map_util.h"
namespace mongo {
@@ -49,7 +48,7 @@ namespace mongo {
ReplicaSetMonitorManager::~ReplicaSetMonitorManager() = default;
shared_ptr<ReplicaSetMonitor> ReplicaSetMonitorManager::getMonitor(StringData setName) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
return mapFindWithDefault(_monitors, setName, shared_ptr<ReplicaSetMonitor>());
}
@@ -58,7 +57,7 @@ namespace mongo {
ReplicaSetMonitorManager::getOrCreateMonitor(const ConnectionString& connStr) {
invariant(connStr.type() == ConnectionString::SET);
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
shared_ptr<ReplicaSetMonitor>& monitor = _monitors[connStr.getSetName()];
if (!monitor) {
@@ -74,7 +73,7 @@ namespace mongo {
vector<string> ReplicaSetMonitorManager::getAllSetNames() {
vector<string> allNames;
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
for (const auto& entry : _monitors) {
allNames.push_back(entry.first);
@@ -84,7 +83,7 @@ namespace mongo {
}
void ReplicaSetMonitorManager::removeMonitor(StringData setName) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
ReplicaSetMonitorsMap::const_iterator it = _monitors.find(setName);
if (it != _monitors.end()) {
@@ -93,14 +92,14 @@ namespace mongo {
}
void ReplicaSetMonitorManager::removeAllMonitors() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
// Reset the StringMap, which will release all registered monitors
_monitors = ReplicaSetMonitorsMap();
}
void ReplicaSetMonitorManager::report(BSONObjBuilder* builder) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
for (const auto& monitorPair : _monitors) {
BSONObjBuilder monitorInfo(builder->subobjStart(monitorPair.first));
diff --git a/src/mongo/client/syncclusterconnection.cpp b/src/mongo/client/syncclusterconnection.cpp
index 851dd0e9b11..f70b982b291 100644
--- a/src/mongo/client/syncclusterconnection.cpp
+++ b/src/mongo/client/syncclusterconnection.cpp
@@ -586,7 +586,7 @@ namespace mongo {
int SyncClusterConnection::_lockType( const string& name ) {
{
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
map<string,int>::iterator i = _lockTypes.find( name );
if ( i != _lockTypes.end() )
return i->second;
@@ -597,7 +597,7 @@ namespace mongo {
int lockType = info["lockType"].numberInt();
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_lockTypes[name] = lockType;
return lockType;
}
diff --git a/src/mongo/db/auth/authorization_manager.cpp b/src/mongo/db/auth/authorization_manager.cpp
index d9f9fa6660c..016d8da33c6 100644
--- a/src/mongo/db/auth/authorization_manager.cpp
+++ b/src/mongo/db/auth/authorization_manager.cpp
@@ -33,7 +33,6 @@
#include "mongo/db/auth/authorization_manager.h"
#include <boost/bind.hpp>
-#include <boost/thread/mutex.hpp>
#include <memory>
#include <string>
#include <vector>
@@ -58,6 +57,7 @@
#include "mongo/platform/compiler.h"
#include "mongo/platform/unordered_map.h"
#include "mongo/stdx/memory.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
@@ -248,7 +248,7 @@ namespace mongo {
OID _startGeneration;
bool _isThisGuardInFetchPhase;
AuthorizationManager* _authzManager;
- boost::unique_lock<boost::mutex> _lock;
+ stdx::unique_lock<stdx::mutex> _lock;
};
AuthorizationManager::AuthorizationManager(
@@ -312,7 +312,7 @@ namespace mongo {
}
bool AuthorizationManager::hasAnyPrivilegeDocuments(OperationContext* txn) {
- boost::unique_lock<boost::mutex> lk(_privilegeDocsExistMutex);
+ stdx::unique_lock<stdx::mutex> lk(_privilegeDocsExistMutex);
if (_privilegeDocsExist) {
// If we know that a user exists, don't re-check.
return true;
diff --git a/src/mongo/db/auth/authorization_manager.h b/src/mongo/db/auth/authorization_manager.h
index 009f8e8130b..d168fdf8eec 100644
--- a/src/mongo/db/auth/authorization_manager.h
+++ b/src/mongo/db/auth/authorization_manager.h
@@ -29,7 +29,6 @@
#pragma once
#include <boost/thread/condition_variable.hpp>
-#include <boost/thread/mutex.hpp>
#include <memory>
#include <string>
@@ -47,6 +46,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/platform/unordered_map.h"
#include "mongo/stdx/functional.h"
+#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -360,7 +360,7 @@ namespace mongo {
bool _privilegeDocsExist;
// Protects _privilegeDocsExist
- mutable boost::mutex _privilegeDocsExistMutex;
+ mutable stdx::mutex _privilegeDocsExistMutex;
std::unique_ptr<AuthzManagerExternalState> _externalState;
@@ -400,7 +400,7 @@ namespace mongo {
* Protects _userCache, _cacheGeneration, _version and _isFetchPhaseBusy. Manipulated
* via CacheGuard.
*/
- boost::mutex _cacheMutex;
+ stdx::mutex _cacheMutex;
/**
* Condition used to signal that it is OK for another CacheGuard to enter a fetch phase.
diff --git a/src/mongo/db/auth/authz_manager_external_state_local.cpp b/src/mongo/db/auth/authz_manager_external_state_local.cpp
index ae8e52ccf46..e1f4b8e0301 100644
--- a/src/mongo/db/auth/authz_manager_external_state_local.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_local.cpp
@@ -170,7 +170,7 @@ namespace {
PrivilegeVector allPrivileges;
bool isRoleGraphInconsistent;
{
- boost::lock_guard<boost::mutex> lk(_roleGraphMutex);
+ stdx::lock_guard<stdx::mutex> lk(_roleGraphMutex);
isRoleGraphInconsistent = _roleGraphState == roleGraphStateConsistent;
for (size_t i = 0; i < directRoles.size(); ++i) {
const RoleName& role(directRoles[i]);
@@ -237,7 +237,7 @@ namespace {
Status AuthzManagerExternalStateLocal::getRoleDescription(const RoleName& roleName,
bool showPrivileges,
BSONObj* result) {
- boost::lock_guard<boost::mutex> lk(_roleGraphMutex);
+ stdx::lock_guard<stdx::mutex> lk(_roleGraphMutex);
return _getRoleDescription_inlock(roleName, showPrivileges, result);
}
@@ -301,7 +301,7 @@ namespace {
bool showPrivileges,
bool showBuiltinRoles,
vector<BSONObj>* result) {
- boost::lock_guard<boost::mutex> lk(_roleGraphMutex);
+ stdx::lock_guard<stdx::mutex> lk(_roleGraphMutex);
for (RoleNameIterator it = _roleGraph.getRolesForDatabase(dbname);
it.more(); it.next()) {
@@ -336,7 +336,7 @@ namespace {
} // namespace
Status AuthzManagerExternalStateLocal::_initializeRoleGraph(OperationContext* txn) {
- boost::lock_guard<boost::mutex> lkInitialzeRoleGraph(_roleGraphMutex);
+ stdx::lock_guard<stdx::mutex> lkInitialzeRoleGraph(_roleGraphMutex);
_roleGraphState = roleGraphStateInitial;
_roleGraph = RoleGraph();
@@ -395,7 +395,7 @@ namespace {
}
virtual void commit() {
- boost::lock_guard<boost::mutex> lk(_externalState->_roleGraphMutex);
+ stdx::lock_guard<stdx::mutex> lk(_externalState->_roleGraphMutex);
Status status = _externalState->_roleGraph.handleLogOp(_op.c_str(),
NamespaceString(_ns.c_str()),
_o,
diff --git a/src/mongo/db/auth/authz_manager_external_state_local.h b/src/mongo/db/auth/authz_manager_external_state_local.h
index 3233233803c..f8243aff00e 100644
--- a/src/mongo/db/auth/authz_manager_external_state_local.h
+++ b/src/mongo/db/auth/authz_manager_external_state_local.h
@@ -138,7 +138,7 @@ namespace mongo {
/**
* Guards _roleGraphState and _roleGraph.
*/
- boost::mutex _roleGraphMutex;
+ stdx::mutex _roleGraphMutex;
};
} // namespace mongo
diff --git a/src/mongo/db/auth/internal_user_auth.cpp b/src/mongo/db/auth/internal_user_auth.cpp
index f5a0c4029a1..6c8190845ea 100644
--- a/src/mongo/db/auth/internal_user_auth.cpp
+++ b/src/mongo/db/auth/internal_user_auth.cpp
@@ -49,7 +49,7 @@ namespace mongo {
// guarded by the authParams mutex
static BSONObj authParams;
- static boost::mutex authParamMutex;
+ static stdx::mutex authParamMutex;
bool isInternalAuthSet() {
return authParamsSet;
@@ -59,7 +59,7 @@ namespace mongo {
if (!isInternalAuthSet()) {
authParamsSet = true;
}
- boost::lock_guard<boost::mutex> lk(authParamMutex);
+ stdx::lock_guard<stdx::mutex> lk(authParamMutex);
if (authParamsIn["mechanism"].String() != "SCRAM-SHA-1") {
authParams = authParamsIn.copy();
@@ -83,7 +83,7 @@ namespace mongo {
return BSONObj();
}
- boost::lock_guard<boost::mutex> lk(authParamMutex);
+ stdx::lock_guard<stdx::mutex> lk(authParamMutex);
return authParams.copy();
}
diff --git a/src/mongo/db/auth/user_cache_invalidator_job.cpp b/src/mongo/db/auth/user_cache_invalidator_job.cpp
index 0989c05a354..557214e0fd7 100644
--- a/src/mongo/db/auth/user_cache_invalidator_job.cpp
+++ b/src/mongo/db/auth/user_cache_invalidator_job.cpp
@@ -31,7 +31,6 @@
#include "mongo/db/auth/user_cache_invalidator_job.h"
-#include <boost/thread/mutex.hpp>
#include <string>
#include "mongo/base/status.h"
@@ -43,6 +42,7 @@
#include "mongo/db/server_parameters.h"
#include "mongo/s/catalog/catalog_manager.h"
#include "mongo/s/grid.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/background.h"
#include "mongo/util/exit.h"
#include "mongo/util/log.h"
@@ -53,7 +53,7 @@ namespace {
// How often to check with the config servers whether authorization information has changed.
int userCacheInvalidationIntervalSecs = 30; // 30 second default
- boost::mutex invalidationIntervalMutex;
+ stdx::mutex invalidationIntervalMutex;
boost::condition_variable invalidationIntervalChangedCondition;
Date_t lastInvalidationTime;
@@ -81,7 +81,7 @@ namespace {
using ExportedServerParameter<int>::set;
virtual Status set( const int& newValue ) {
- boost::unique_lock<boost::mutex> lock(invalidationIntervalMutex);
+ stdx::unique_lock<stdx::mutex> lock(invalidationIntervalMutex);
Status status = ExportedServerParameter<int>::set(newValue);
invalidationIntervalChangedCondition.notify_all();
return status;
@@ -134,7 +134,7 @@ namespace {
lastInvalidationTime = Date_t::now();
while (true) {
- boost::unique_lock<boost::mutex> lock(invalidationIntervalMutex);
+ stdx::unique_lock<stdx::mutex> lock(invalidationIntervalMutex);
Date_t sleepUntil = lastInvalidationTime + Seconds(userCacheInvalidationIntervalSecs);
Date_t now = Date_t::now();
while (now < sleepUntil) {
diff --git a/src/mongo/db/background.cpp b/src/mongo/db/background.cpp
index 94969dba15d..ffa053bc7e6 100644
--- a/src/mongo/db/background.cpp
+++ b/src/mongo/db/background.cpp
@@ -55,7 +55,7 @@ namespace {
void recordBegin();
int recordEnd();
- void awaitNoBgOps(boost::unique_lock<boost::mutex>& lk);
+ void awaitNoBgOps(stdx::unique_lock<stdx::mutex>& lk);
int getOpsInProgCount() const { return _opsInProgCount; }
@@ -67,7 +67,7 @@ namespace {
typedef StringMap<std::shared_ptr<BgInfo> > BgInfoMap;
typedef BgInfoMap::const_iterator BgInfoMapIterator;
- boost::mutex m;
+ stdx::mutex m;
BgInfoMap dbsInProg;
BgInfoMap nsInProg;
@@ -84,7 +84,7 @@ namespace {
return _opsInProgCount;
}
- void BgInfo::awaitNoBgOps(boost::unique_lock<boost::mutex>& lk) {
+ void BgInfo::awaitNoBgOps(stdx::unique_lock<stdx::mutex>& lk) {
while (_opsInProgCount > 0)
_noOpsInProg.wait(lk);
}
@@ -105,7 +105,7 @@ namespace {
}
void awaitNoBgOps(
- boost::unique_lock<boost::mutex>& lk,
+ stdx::unique_lock<stdx::mutex>& lk,
BgInfoMap* bgiMap,
StringData key) {
@@ -118,12 +118,12 @@ namespace {
} // namespace
bool BackgroundOperation::inProgForDb(StringData db) {
- boost::lock_guard<boost::mutex> lk(m);
+ stdx::lock_guard<stdx::mutex> lk(m);
return dbsInProg.find(db) != dbsInProg.end();
}
bool BackgroundOperation::inProgForNs(StringData ns) {
- boost::lock_guard<boost::mutex> lk(m);
+ stdx::lock_guard<stdx::mutex> lk(m);
return nsInProg.find(ns) != nsInProg.end();
}
@@ -142,29 +142,29 @@ namespace {
}
void BackgroundOperation::awaitNoBgOpInProgForDb(StringData db) {
- boost::unique_lock<boost::mutex> lk(m);
+ stdx::unique_lock<stdx::mutex> lk(m);
awaitNoBgOps(lk, &dbsInProg, db);
}
void BackgroundOperation::awaitNoBgOpInProgForNs(StringData ns) {
- boost::unique_lock<boost::mutex> lk(m);
+ stdx::unique_lock<stdx::mutex> lk(m);
awaitNoBgOps(lk, &nsInProg, ns);
}
BackgroundOperation::BackgroundOperation(StringData ns) : _ns(ns) {
- boost::lock_guard<boost::mutex> lk(m);
+ stdx::lock_guard<stdx::mutex> lk(m);
recordBeginAndInsert(&dbsInProg, _ns.db());
recordBeginAndInsert(&nsInProg, _ns.ns());
}
BackgroundOperation::~BackgroundOperation() {
- boost::lock_guard<boost::mutex> lk(m);
+ stdx::lock_guard<stdx::mutex> lk(m);
recordEndAndRemove(&dbsInProg, _ns.db());
recordEndAndRemove(&nsInProg, _ns.ns());
}
void BackgroundOperation::dump(std::ostream& ss) {
- boost::lock_guard<boost::mutex> lk(m);
+ stdx::lock_guard<stdx::mutex> lk(m);
if( nsInProg.size() ) {
ss << "\n<b>Background Jobs in Progress</b>\n";
for( BgInfoMapIterator i = nsInProg.begin(); i != nsInProg.end(); ++i )
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index f7cd6330366..4dc2d8527f6 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -78,7 +78,7 @@ namespace mongo {
Database* db,
const std::string& fullCollectionName,
bool* fromCache) {
- boost::unique_lock<boost::mutex> cachedHashedLock(_cachedHashedMutex, boost::defer_lock);
+ stdx::unique_lock<stdx::mutex> cachedHashedLock(_cachedHashedMutex, stdx::defer_lock);
if ( isCachable( fullCollectionName ) ) {
cachedHashedLock.lock();
@@ -232,7 +232,7 @@ namespace mongo {
}
void commit() {
- boost::lock_guard<boost::mutex> lk( _dCmd->_cachedHashedMutex );
+ stdx::lock_guard<stdx::mutex> lk( _dCmd->_cachedHashedMutex );
_dCmd->_cachedHashed.erase(_ns);
}
void rollback() { }
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index c4248574f0e..21b0313d5b1 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -32,7 +32,6 @@
#include "mongo/db/commands/user_management_commands.h"
-#include <boost/thread/mutex.hpp>
#include <string>
#include <vector>
@@ -64,6 +63,7 @@
#include "mongo/db/service_context.h"
#include "mongo/platform/unordered_set.h"
#include "mongo/stdx/functional.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
#include "mongo/util/net/ssl_manager.h"
@@ -754,7 +754,7 @@ namespace {
return appendCommandStatus(result, status);
}
- boost::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ stdx::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
authzDataMutexAcquisitionTimeout);
if (!lk) {
return appendCommandStatus(
@@ -874,7 +874,7 @@ namespace {
}
ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- boost::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ stdx::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
authzDataMutexAcquisitionTimeout);
if (!lk) {
return appendCommandStatus(
@@ -948,7 +948,7 @@ namespace {
string& errmsg,
BSONObjBuilder& result) {
ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- boost::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ stdx::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
authzDataMutexAcquisitionTimeout);
if (!lk) {
return appendCommandStatus(
@@ -1029,7 +1029,7 @@ namespace {
string& errmsg,
BSONObjBuilder& result) {
ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- boost::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ stdx::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
authzDataMutexAcquisitionTimeout);
if (!lk) {
return appendCommandStatus(
@@ -1099,7 +1099,7 @@ namespace {
string& errmsg,
BSONObjBuilder& result) {
ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- boost::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ stdx::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
authzDataMutexAcquisitionTimeout);
if (!lk) {
return appendCommandStatus(
@@ -1184,7 +1184,7 @@ namespace {
string& errmsg,
BSONObjBuilder& result) {
ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- boost::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ stdx::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
authzDataMutexAcquisitionTimeout);
if (!lk) {
return appendCommandStatus(
@@ -1443,7 +1443,7 @@ namespace {
roleObjBuilder.append("roles", rolesVectorToBSONArray(args.roles));
ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- boost::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ stdx::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
authzDataMutexAcquisitionTimeout);
if (!lk) {
return appendCommandStatus(
@@ -1537,7 +1537,7 @@ namespace {
}
ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- boost::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ stdx::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
authzDataMutexAcquisitionTimeout);
if (!lk) {
return appendCommandStatus(
@@ -1614,7 +1614,7 @@ namespace {
string& errmsg,
BSONObjBuilder& result) {
ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- boost::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ stdx::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
authzDataMutexAcquisitionTimeout);
if (!lk) {
return appendCommandStatus(
@@ -1737,7 +1737,7 @@ namespace {
string& errmsg,
BSONObjBuilder& result) {
ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- boost::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ stdx::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
authzDataMutexAcquisitionTimeout);
if (!lk) {
return appendCommandStatus(
@@ -1885,7 +1885,7 @@ namespace {
}
ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- boost::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ stdx::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
authzDataMutexAcquisitionTimeout);
if (!lk) {
return appendCommandStatus(
@@ -1969,7 +1969,7 @@ namespace {
string& errmsg,
BSONObjBuilder& result) {
ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- boost::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ stdx::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
authzDataMutexAcquisitionTimeout);
if (!lk) {
return appendCommandStatus(
@@ -2073,7 +2073,7 @@ namespace {
string& errmsg,
BSONObjBuilder& result) {
ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- boost::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ stdx::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
authzDataMutexAcquisitionTimeout);
if (!lk) {
return appendCommandStatus(
@@ -2246,7 +2246,7 @@ namespace {
}
ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- boost::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ stdx::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
authzDataMutexAcquisitionTimeout);
if (!lk) {
return appendCommandStatus(
@@ -2897,7 +2897,7 @@ namespace {
}
ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- boost::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ stdx::unique_lock<boost::timed_mutex> lk(getAuthzDataMutex(serviceContext),
authzDataMutexAcquisitionTimeout);
if (!lk) {
return appendCommandStatus(
diff --git a/src/mongo/db/concurrency/d_concurrency.cpp b/src/mongo/db/concurrency/d_concurrency.cpp
index ddee7e74e33..1851e84dcc2 100644
--- a/src/mongo/db/concurrency/d_concurrency.cpp
+++ b/src/mongo/db/concurrency/d_concurrency.cpp
@@ -183,7 +183,7 @@ namespace {
}
namespace {
- boost::mutex oplogSerialization; // for OplogIntentWriteLock
+ stdx::mutex oplogSerialization; // for OplogIntentWriteLock
} // namespace
Lock::OplogIntentWriteLock::OplogIntentWriteLock(Locker* lockState)
diff --git a/src/mongo/db/concurrency/lock_manager.h b/src/mongo/db/concurrency/lock_manager.h
index 991fe8fbd77..a73d57232f3 100644
--- a/src/mongo/db/concurrency/lock_manager.h
+++ b/src/mongo/db/concurrency/lock_manager.h
@@ -29,17 +29,17 @@
#pragma once
#include <boost/thread/condition_variable.hpp>
-#include <boost/thread/mutex.hpp>
#include <deque>
+#include "mongo/config.h"
#include "mongo/db/concurrency/lock_manager_defs.h"
#include "mongo/db/concurrency/lock_request_list.h"
-#include "mongo/config.h"
#include "mongo/platform/atomic_word.h"
#include "mongo/platform/compiler.h"
#include "mongo/platform/cstdint.h"
#include "mongo/platform/unordered_map.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/mutex.h"
namespace mongo {
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index b2047138669..6c12a8ae1b1 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -227,7 +227,7 @@ namespace {
}
LockResult CondVarLockGrantNotification::wait(unsigned timeoutMs) {
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
while (_result == LOCK_INVALID) {
if (boost::cv_status::timeout == _cond.wait_for(lock, Milliseconds(timeoutMs))) {
// Timeout
@@ -239,7 +239,7 @@ namespace {
}
void CondVarLockGrantNotification::notify(ResourceId resId, LockResult result) {
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
invariant(_result == LOCK_INVALID);
_result = result;
@@ -778,7 +778,7 @@ namespace {
bool LockerImpl<IsForMMAPV1>::hasStrongLocks() const {
if (!isLocked()) return false;
- boost::lock_guard<SpinLock> lk(_lock);
+ stdx::lock_guard<SpinLock> lk(_lock);
LockRequestsMap::ConstIterator it = _requests.begin();
while (!it.finished()) {
if (it->mode == MODE_X || it->mode == MODE_S) {
diff --git a/src/mongo/db/concurrency/lock_state.h b/src/mongo/db/concurrency/lock_state.h
index 8ea914ffbeb..b179ae28e80 100644
--- a/src/mongo/db/concurrency/lock_state.h
+++ b/src/mongo/db/concurrency/lock_state.h
@@ -62,7 +62,7 @@ namespace mongo {
virtual void notify(ResourceId resId, LockResult result);
// These two go together to implement the conditional variable pattern.
- boost::mutex _mutex;
+ stdx::mutex _mutex;
boost::condition_variable _cond;
// Result from the last call to notify
diff --git a/src/mongo/db/concurrency/lock_state_test.cpp b/src/mongo/db/concurrency/lock_state_test.cpp
index cb9aab0172a..54f305f20df 100644
--- a/src/mongo/db/concurrency/lock_state_test.cpp
+++ b/src/mongo/db/concurrency/lock_state_test.cpp
@@ -260,7 +260,7 @@ namespace {
TEST(Locker, PerformanceBoostSharedMutex) {
for (int numLockers = 1; numLockers <= 64; numLockers = numLockers * 2) {
- boost::mutex mtx;
+ stdx::mutex mtx;
// Do some warm-up loops
for (int i = 0; i < 1000; i++) {
diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp
index 14ea13e7fed..3da1f2427e4 100644
--- a/src/mongo/db/curop.cpp
+++ b/src/mongo/db/curop.cpp
@@ -74,7 +74,7 @@ namespace mongo {
else {
_opCtx = opCtx;
}
- boost::lock_guard<Client> lk(*_opCtx->getClient());
+ stdx::lock_guard<Client> lk(*_opCtx->getClient());
push_nolock(curOp);
}
diff --git a/src/mongo/db/global_timestamp.cpp b/src/mongo/db/global_timestamp.cpp
index f6cf646ba7e..3e18ad3e2aa 100644
--- a/src/mongo/db/global_timestamp.cpp
+++ b/src/mongo/db/global_timestamp.cpp
@@ -51,17 +51,17 @@ namespace {
namespace mongo {
void setGlobalTimestamp(const Timestamp& newTime) {
- boost::lock_guard<boost::mutex> lk(globalTimestampMutex);
+ stdx::lock_guard<stdx::mutex> lk(globalTimestampMutex);
globalTimestamp = newTime;
}
Timestamp getLastSetTimestamp() {
- boost::lock_guard<boost::mutex> lk(globalTimestampMutex);
+ stdx::lock_guard<stdx::mutex> lk(globalTimestampMutex);
return globalTimestamp;
}
Timestamp getNextGlobalTimestamp() {
- boost::lock_guard<boost::mutex> lk(globalTimestampMutex);
+ stdx::lock_guard<stdx::mutex> lk(globalTimestampMutex);
const unsigned now = (unsigned) time(0);
const unsigned globalSecs = globalTimestamp.getSecs();
diff --git a/src/mongo/db/index_builder.cpp b/src/mongo/db/index_builder.cpp
index e5939343265..ae60503cf7b 100644
--- a/src/mongo/db/index_builder.cpp
+++ b/src/mongo/db/index_builder.cpp
@@ -56,11 +56,11 @@ namespace {
// The bool is 'true' when a new background index has started in a new thread but the
// parent thread has not yet synchronized with it.
bool _bgIndexStarting(false);
- boost::mutex _bgIndexStartingMutex;
+ stdx::mutex _bgIndexStartingMutex;
boost::condition_variable _bgIndexStartingCondVar;
void _setBgIndexStarting() {
- boost::lock_guard<boost::mutex> lk(_bgIndexStartingMutex);
+ stdx::lock_guard<stdx::mutex> lk(_bgIndexStartingMutex);
invariant(_bgIndexStarting == false);
_bgIndexStarting = true;
_bgIndexStartingCondVar.notify_one();
@@ -111,7 +111,7 @@ namespace {
}
void IndexBuilder::waitForBgIndexStarting() {
- boost::unique_lock<boost::mutex> lk(_bgIndexStartingMutex);
+ stdx::unique_lock<stdx::mutex> lk(_bgIndexStartingMutex);
while (_bgIndexStarting == false) {
_bgIndexStartingCondVar.wait(lk);
}
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index 3cb3504c24a..6bbacc43242 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -1264,7 +1264,7 @@ namespace {
// Ensures shutdown is single threaded.
// Lock Ordering:
// No restrictions
- boost::mutex shutdownLock;
+ stdx::mutex shutdownLock;
void signalShutdown() {
// Notify all threads shutdown has started
@@ -1276,7 +1276,7 @@ namespace {
shutdownInProgress.fetchAndAdd(1);
// Grab the shutdown lock to prevent concurrent callers
- boost::lock_guard<boost::mutex> lockguard(shutdownLock);
+ stdx::lock_guard<stdx::mutex> lockguard(shutdownLock);
// Global storage engine may not be started in all cases before we exit
if (getGlobalServiceContext()->getGlobalStorageEngine() == NULL) {
@@ -1371,37 +1371,37 @@ namespace {
}
int DiagLog::setLevel( int newLevel ) {
- boost::lock_guard<boost::mutex> lk(mutex);
+ stdx::lock_guard<stdx::mutex> lk(mutex);
int old = level;
log() << "diagLogging level=" << newLevel << endl;
- if( f == 0 ) {
+ if( f == 0 ) {
openFile();
}
level = newLevel; // must be done AFTER f is set
return old;
}
-
+
void DiagLog::flush() {
if ( level ) {
log() << "flushing diag log" << endl;
- boost::lock_guard<boost::mutex> lk(mutex);
+ stdx::lock_guard<stdx::mutex> lk(mutex);
f->flush();
}
}
-
+
void DiagLog::writeop(char *data,int len) {
if ( level & 1 ) {
- boost::lock_guard<boost::mutex> lk(mutex);
+ stdx::lock_guard<stdx::mutex> lk(mutex);
f->write(data,len);
}
}
-
+
void DiagLog::readop(char *data, int len) {
if ( level & 2 ) {
bool log = (level & 4) == 0;
OCCASIONALLY log = true;
if ( log ) {
- boost::lock_guard<boost::mutex> lk(mutex);
+ stdx::lock_guard<stdx::mutex> lk(mutex);
verify( f );
f->write(data,len);
}
diff --git a/src/mongo/db/query/plan_cache.cpp b/src/mongo/db/query/plan_cache.cpp
index 0f2871e2f29..b79c2e8f659 100644
--- a/src/mongo/db/query/plan_cache.cpp
+++ b/src/mongo/db/query/plan_cache.cpp
@@ -581,7 +581,7 @@ namespace {
entry->sort = pq.getSort().getOwned();
entry->projection = pq.getProj().getOwned();
- boost::lock_guard<boost::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
std::unique_ptr<PlanCacheEntry> evictedEntry = _cache.add(computeKey(query), entry);
if (NULL != evictedEntry.get()) {
@@ -597,7 +597,7 @@ namespace {
PlanCacheKey key = computeKey(query);
verify(crOut);
- boost::lock_guard<boost::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
PlanCacheEntry* entry;
Status cacheStatus = _cache.get(key, &entry);
if (!cacheStatus.isOK()) {
@@ -617,7 +617,7 @@ namespace {
std::unique_ptr<PlanCacheEntryFeedback> autoFeedback(feedback);
PlanCacheKey ck = computeKey(cq);
- boost::lock_guard<boost::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
PlanCacheEntry* entry;
Status cacheStatus = _cache.get(ck, &entry);
if (!cacheStatus.isOK()) {
@@ -634,12 +634,12 @@ namespace {
}
Status PlanCache::remove(const CanonicalQuery& canonicalQuery) {
- boost::lock_guard<boost::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
return _cache.remove(computeKey(canonicalQuery));
}
void PlanCache::clear() {
- boost::lock_guard<boost::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
_cache.clear();
_writeOperations.store(0);
}
@@ -656,7 +656,7 @@ namespace {
PlanCacheKey key = computeKey(query);
verify(entryOut);
- boost::lock_guard<boost::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
PlanCacheEntry* entry;
Status cacheStatus = _cache.get(key, &entry);
if (!cacheStatus.isOK()) {
@@ -670,7 +670,7 @@ namespace {
}
std::vector<PlanCacheEntry*> PlanCache::getAllEntries() const {
- boost::lock_guard<boost::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
std::vector<PlanCacheEntry*> entries;
typedef std::list< std::pair<PlanCacheKey, PlanCacheEntry*> >::const_iterator ConstIterator;
for (ConstIterator i = _cache.begin(); i != _cache.end(); i++) {
@@ -682,12 +682,12 @@ namespace {
}
bool PlanCache::contains(const CanonicalQuery& cq) const {
- boost::lock_guard<boost::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
return _cache.hasKey(computeKey(cq));
}
size_t PlanCache::size() const {
- boost::lock_guard<boost::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
return _cache.size();
}
diff --git a/src/mongo/db/query/plan_cache.h b/src/mongo/db/query/plan_cache.h
index 488180537e4..3bc1e474365 100644
--- a/src/mongo/db/query/plan_cache.h
+++ b/src/mongo/db/query/plan_cache.h
@@ -30,7 +30,6 @@
#include <set>
#include <boost/optional/optional.hpp>
-#include <boost/thread/mutex.hpp>
#include "mongo/db/exec/plan_stats.h"
#include "mongo/db/query/canonical_query.h"
@@ -39,6 +38,7 @@
#include "mongo/db/query/plan_cache_indexability.h"
#include "mongo/db/query/query_planner_params.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -400,7 +400,7 @@ namespace mongo {
LRUKeyValue<PlanCacheKey, PlanCacheEntry> _cache;
// Protects _cache.
- mutable boost::mutex _cacheMutex;
+ mutable stdx::mutex _cacheMutex;
// Counter for write notifications since initialization or last clear() invocation. Starts
// at 0.
diff --git a/src/mongo/db/query/query_settings.cpp b/src/mongo/db/query/query_settings.cpp
index 6c12dbcc069..c6b2f34fcb8 100644
--- a/src/mongo/db/query/query_settings.cpp
+++ b/src/mongo/db/query/query_settings.cpp
@@ -86,7 +86,7 @@ namespace mongo {
AllowedIndices** allowedIndicesOut) const {
invariant(allowedIndicesOut);
- boost::lock_guard<boost::mutex> cacheLock(_mutex);
+ stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
AllowedIndexEntryMap::const_iterator cacheIter = _allowedIndexEntryMap.find(key);
// Nothing to do if key does not exist in query settings.
@@ -104,7 +104,7 @@ namespace mongo {
}
std::vector<AllowedIndexEntry*> QuerySettings::getAllAllowedIndices() const {
- boost::lock_guard<boost::mutex> cacheLock(_mutex);
+ stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
vector<AllowedIndexEntry*> entries;
for (AllowedIndexEntryMap::const_iterator i = _allowedIndexEntryMap.begin(); i != _allowedIndexEntryMap.end(); ++i) {
AllowedIndexEntry* entry = i->second;
@@ -122,7 +122,7 @@ namespace mongo {
const BSONObj& projection = lpq.getProj();
AllowedIndexEntry* entry = new AllowedIndexEntry(query, sort, projection, indexes);
- boost::lock_guard<boost::mutex> cacheLock(_mutex);
+ stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
AllowedIndexEntryMap::iterator i = _allowedIndexEntryMap.find(key);
// Replace existing entry.
if (i != _allowedIndexEntryMap.end()) {
@@ -133,7 +133,7 @@ namespace mongo {
}
void QuerySettings::removeAllowedIndices(const PlanCacheKey& key) {
- boost::lock_guard<boost::mutex> cacheLock(_mutex);
+ stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
AllowedIndexEntryMap::iterator i = _allowedIndexEntryMap.find(key);
// Nothing to do if key does not exist in query settings.
@@ -148,7 +148,7 @@ namespace mongo {
}
void QuerySettings::clearAllowedIndices() {
- boost::lock_guard<boost::mutex> cacheLock(_mutex);
+ stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
_clear();
}
diff --git a/src/mongo/db/query/query_settings.h b/src/mongo/db/query/query_settings.h
index a65ea88d901..29449167580 100644
--- a/src/mongo/db/query/query_settings.h
+++ b/src/mongo/db/query/query_settings.h
@@ -30,13 +30,14 @@
#include <string>
#include <vector>
-#include <boost/thread/mutex.hpp>
+
#include "mongo/base/disallow_copying.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/index_entry.h"
#include "mongo/db/query/plan_cache.h"
#include "mongo/platform/unordered_map.h"
+#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -141,7 +142,7 @@ namespace mongo {
/**
* Protects data in query settings.
*/
- mutable boost::mutex _mutex;
+ mutable stdx::mutex _mutex;
};
} // namespace mongo
diff --git a/src/mongo/db/range_deleter.cpp b/src/mongo/db/range_deleter.cpp
index 75b1a9c38a7..55e32533aaa 100644
--- a/src/mongo/db/range_deleter.cpp
+++ b/src/mongo/db/range_deleter.cpp
@@ -199,7 +199,7 @@ namespace mongo {
void RangeDeleter::stopWorkers() {
{
- boost::lock_guard<boost::mutex> sl(_stopMutex);
+ stdx::lock_guard<stdx::mutex> sl(_stopMutex);
_stopRequested = true;
}
@@ -207,7 +207,7 @@ namespace mongo {
_worker->join();
}
- boost::unique_lock<boost::mutex> sl(_queueMutex);
+ stdx::unique_lock<stdx::mutex> sl(_queueMutex);
while (_deletesInProgress > 0) {
_nothingInProgressCV.wait(sl);
}
@@ -229,7 +229,7 @@ namespace mongo {
toDelete->notifyDone = notifyDone;
{
- boost::lock_guard<boost::mutex> sl(_queueMutex);
+ stdx::lock_guard<stdx::mutex> sl(_queueMutex);
if (_stopRequested) {
*errMsg = "deleter is already stopped.";
return false;
@@ -252,7 +252,7 @@ namespace mongo {
logCursorsWaiting(toDelete.get());
{
- boost::lock_guard<boost::mutex> sl(_queueMutex);
+ stdx::lock_guard<stdx::mutex> sl(_queueMutex);
if (toDelete->cursorsToWait.empty()) {
toDelete->stats.queueEndTS = jsTime();
@@ -321,7 +321,7 @@ namespace {
NSMinMax deleteRange(ns, min, max);
{
- boost::lock_guard<boost::mutex> sl(_queueMutex);
+ stdx::lock_guard<stdx::mutex> sl(_queueMutex);
if (!canEnqueue_inlock(ns, min, max, errMsg)) {
return false;
}
@@ -363,7 +363,7 @@ namespace {
if (stopRequested()) {
*errMsg = "deleter was stopped.";
- boost::lock_guard<boost::mutex> sl(_queueMutex);
+ stdx::lock_guard<stdx::mutex> sl(_queueMutex);
_deleteSet.erase(&deleteRange);
_deletesInProgress--;
@@ -396,7 +396,7 @@ namespace {
}
{
- boost::lock_guard<boost::mutex> sl(_queueMutex);
+ stdx::lock_guard<stdx::mutex> sl(_queueMutex);
_deleteSet.erase(&deleteRange);
_deletesInProgress--;
@@ -414,7 +414,7 @@ namespace {
stats->clear();
stats->reserve(kDeleteJobsHistory);
- boost::lock_guard<boost::mutex> sl(_statsHistoryMutex);
+ stdx::lock_guard<stdx::mutex> sl(_statsHistoryMutex);
for (std::deque<DeleteJobStats*>::const_iterator it = _statsHistory.begin();
it != _statsHistory.end(); ++it) {
stats->push_back(new DeleteJobStats(**it));
@@ -422,7 +422,7 @@ namespace {
}
BSONObj RangeDeleter::toBSON() const {
- boost::lock_guard<boost::mutex> sl(_queueMutex);
+ stdx::lock_guard<stdx::mutex> sl(_queueMutex);
BSONObjBuilder builder;
@@ -453,7 +453,7 @@ namespace {
RangeDeleteEntry* nextTask = NULL;
{
- boost::unique_lock<boost::mutex> sl(_queueMutex);
+ stdx::unique_lock<stdx::mutex> sl(_queueMutex);
while (_taskQueue.empty()) {
_taskQueueNotEmptyCV.timed_wait(
sl, duration::milliseconds(kNotEmptyTimeoutMillis));
@@ -539,7 +539,7 @@ namespace {
}
{
- boost::lock_guard<boost::mutex> sl(_queueMutex);
+ stdx::lock_guard<stdx::mutex> sl(_queueMutex);
NSMinMax setEntry(nextTask->options.range.ns,
nextTask->options.range.minKey,
@@ -574,27 +574,27 @@ namespace {
}
bool RangeDeleter::stopRequested() const {
- boost::lock_guard<boost::mutex> sl(_stopMutex);
+ stdx::lock_guard<stdx::mutex> sl(_stopMutex);
return _stopRequested;
}
size_t RangeDeleter::getTotalDeletes() const {
- boost::lock_guard<boost::mutex> sl(_queueMutex);
+ stdx::lock_guard<stdx::mutex> sl(_queueMutex);
return _deleteSet.size();
}
size_t RangeDeleter::getPendingDeletes() const {
- boost::lock_guard<boost::mutex> sl(_queueMutex);
+ stdx::lock_guard<stdx::mutex> sl(_queueMutex);
return _notReadyQueue.size() + _taskQueue.size();
}
size_t RangeDeleter::getDeletesInProgress() const {
- boost::lock_guard<boost::mutex> sl(_queueMutex);
+ stdx::lock_guard<stdx::mutex> sl(_queueMutex);
return _deletesInProgress;
}
void RangeDeleter::recordDelStats(DeleteJobStats* newStat) {
- boost::lock_guard<boost::mutex> sl(_statsHistoryMutex);
+ stdx::lock_guard<stdx::mutex> sl(_statsHistoryMutex);
if (_statsHistory.size() == kDeleteJobsHistory) {
delete _statsHistory.front();
_statsHistory.pop_front();
diff --git a/src/mongo/db/range_deleter_mock_env.cpp b/src/mongo/db/range_deleter_mock_env.cpp
index 97786c83ee7..f831c746ceb 100644
--- a/src/mongo/db/range_deleter_mock_env.cpp
+++ b/src/mongo/db/range_deleter_mock_env.cpp
@@ -61,47 +61,47 @@ namespace mongo {
}
void RangeDeleterMockEnv::addCursorId(StringData ns, CursorId id) {
- boost::lock_guard<boost::mutex> sl(_cursorMapMutex);
+ stdx::lock_guard<stdx::mutex> sl(_cursorMapMutex);
_cursorMap[ns.toString()].insert(id);
}
void RangeDeleterMockEnv::removeCursorId(StringData ns, CursorId id) {
- boost::lock_guard<boost::mutex> sl(_cursorMapMutex);
+ stdx::lock_guard<stdx::mutex> sl(_cursorMapMutex);
_cursorMap[ns.toString()].erase(id);
}
void RangeDeleterMockEnv::pauseDeletes() {
- boost::lock_guard<boost::mutex> sl(_pauseDeleteMutex);
+ stdx::lock_guard<stdx::mutex> sl(_pauseDeleteMutex);
_pauseDelete = true;
}
void RangeDeleterMockEnv::resumeOneDelete() {
- boost::lock_guard<boost::mutex> sl(_pauseDeleteMutex);
+ stdx::lock_guard<stdx::mutex> sl(_pauseDeleteMutex);
_pauseDelete = false;
_pausedCV.notify_one();
}
void RangeDeleterMockEnv::waitForNthGetCursor(uint64_t nthCall) {
- boost::unique_lock<boost::mutex> sl(_envStatMutex);
+ stdx::unique_lock<stdx::mutex> sl(_envStatMutex);
while (_getCursorsCallCount < nthCall) {
_cursorsCallCountUpdatedCV.wait(sl);
}
}
void RangeDeleterMockEnv::waitForNthPausedDelete(uint64_t nthPause) {
- boost::unique_lock<boost::mutex> sl(_pauseDeleteMutex);
+ stdx::unique_lock<stdx::mutex> sl(_pauseDeleteMutex);
while(_pausedCount < nthPause) {
_pausedDeleteChangeCV.wait(sl);
}
}
bool RangeDeleterMockEnv::deleteOccured() const {
- boost::lock_guard<boost::mutex> sl(_deleteListMutex);
+ stdx::lock_guard<stdx::mutex> sl(_deleteListMutex);
return !_deleteList.empty();
}
DeletedRange RangeDeleterMockEnv::getLastDelete() const {
- boost::lock_guard<boost::mutex> sl(_deleteListMutex);
+ stdx::lock_guard<stdx::mutex> sl(_deleteListMutex);
return _deleteList.back();
}
@@ -111,7 +111,7 @@ namespace mongo {
string* errMsg) {
{
- boost::unique_lock<boost::mutex> sl(_pauseDeleteMutex);
+ stdx::unique_lock<stdx::mutex> sl(_pauseDeleteMutex);
bool wasInitiallyPaused = _pauseDelete;
if (_pauseDelete) {
@@ -127,7 +127,7 @@ namespace mongo {
}
{
- boost::lock_guard<boost::mutex> sl(_deleteListMutex);
+ stdx::lock_guard<stdx::mutex> sl(_deleteListMutex);
DeletedRange entry;
entry.ns = taskDetails.options.range.ns;
@@ -144,13 +144,13 @@ namespace mongo {
void RangeDeleterMockEnv::getCursorIds(
OperationContext* txn, StringData ns, set<CursorId>* in) {
{
- boost::lock_guard<boost::mutex> sl(_cursorMapMutex);
+ stdx::lock_guard<stdx::mutex> sl(_cursorMapMutex);
const set<CursorId>& _cursors = _cursorMap[ns.toString()];
std::copy(_cursors.begin(), _cursors.end(), inserter(*in, in->begin()));
}
{
- boost::lock_guard<boost::mutex> sl(_envStatMutex);
+ stdx::lock_guard<stdx::mutex> sl(_envStatMutex);
_getCursorsCallCount++;
_cursorsCallCountUpdatedCV.notify_one();
}
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index a9c5e068bc7..5317ab36305 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -68,7 +68,7 @@ namespace {
MONGO_FP_DECLARE(rsBgSyncProduce);
BackgroundSync* BackgroundSync::s_instance = 0;
- boost::mutex BackgroundSync::s_mutex;
+ stdx::mutex BackgroundSync::s_mutex;
//The number and time spent reading batches off the network
static TimerStats getmoreReplStats;
@@ -119,7 +119,7 @@ namespace {
}
BackgroundSync* BackgroundSync::get() {
- boost::unique_lock<boost::mutex> lock(s_mutex);
+ stdx::unique_lock<stdx::mutex> lock(s_mutex);
if (s_instance == NULL && !inShutdown()) {
s_instance = new BackgroundSync();
}
@@ -127,7 +127,7 @@ namespace {
}
void BackgroundSync::shutdown() {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
// Clear the buffer in case the producerThread is waiting in push() due to a full queue.
invariant(inShutdown());
@@ -140,7 +140,7 @@ namespace {
}
void BackgroundSync::notify(OperationContext* txn) {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
// If all ops in the buffer have been applied, unblock waitForRepl (if it's waiting)
if (_buffer.empty()) {
@@ -206,7 +206,7 @@ namespace {
// this oplog reader does not do a handshake because we don't want the server it's syncing
// from to track how far it has synced
{
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
if (_lastOpTimeFetched.isNull()) {
// then we're initial syncing and we're still waiting for this to be set
lock.unlock();
@@ -232,7 +232,7 @@ namespace {
// find a target to sync from the last optime fetched
OpTime lastOpTimeFetched;
{
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
lastOpTimeFetched = _lastOpTimeFetched;
_syncSourceHost = HostAndPort();
}
@@ -240,7 +240,7 @@ namespace {
_syncSourceReader.connectToSyncSource(txn, lastOpTimeFetched, _replCoord);
{
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
// no server found
if (_syncSourceReader.getHost().empty()) {
lock.unlock();
@@ -311,7 +311,7 @@ namespace {
// If there is still no data from upstream, check a few more things
// and then loop back for another pass at getting more data
{
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
if (_pause) {
return;
}
@@ -341,7 +341,7 @@ namespace {
opsReadStats.increment();
{
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
_appliedBuffer = false;
}
@@ -354,7 +354,7 @@ namespace {
_buffer.push(o);
{
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
_lastFetchedHash = o["h"].numberLong();
_lastOpTimeFetched = extractOpTime(o);
LOG(3) << "lastOpTimeFetched: " << _lastOpTimeFetched;
@@ -461,17 +461,17 @@ namespace {
}
HostAndPort BackgroundSync::getSyncTarget() {
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
return _syncSourceHost;
}
void BackgroundSync::clearSyncTarget() {
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
_syncSourceHost = HostAndPort();
}
void BackgroundSync::stop() {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
_pause = true;
_syncSourceHost = HostAndPort();
@@ -485,7 +485,7 @@ namespace {
massert(16235, "going to start syncing, but buffer is not empty", _buffer.empty());
long long updatedLastAppliedHash = _readLastAppliedHash(txn);
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_pause = false;
// reset _last fields with current oplog data
@@ -498,14 +498,14 @@ namespace {
}
void BackgroundSync::waitUntilPaused() {
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
while (!_pause) {
_pausedCondition.wait(lock);
}
}
long long BackgroundSync::getLastAppliedHash() const {
- boost::lock_guard<boost::mutex> lck(_mutex);
+ stdx::lock_guard<stdx::mutex> lck(_mutex);
return _lastAppliedHash;
}
@@ -514,13 +514,13 @@ namespace {
}
void BackgroundSync::setLastAppliedHash(long long newHash) {
- boost::lock_guard<boost::mutex> lck(_mutex);
+ stdx::lock_guard<stdx::mutex> lck(_mutex);
_lastAppliedHash = newHash;
}
void BackgroundSync::loadLastAppliedHash(OperationContext* txn) {
long long result = _readLastAppliedHash(txn);
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_lastAppliedHash = result;
}
@@ -558,17 +558,17 @@ namespace {
}
bool BackgroundSync::getInitialSyncRequestedFlag() {
- boost::lock_guard<boost::mutex> lock(_initialSyncMutex);
+ stdx::lock_guard<stdx::mutex> lock(_initialSyncMutex);
return _initialSyncRequestedFlag;
}
void BackgroundSync::setInitialSyncRequestedFlag(bool value) {
- boost::lock_guard<boost::mutex> lock(_initialSyncMutex);
+ stdx::lock_guard<stdx::mutex> lock(_initialSyncMutex);
_initialSyncRequestedFlag = value;
}
void BackgroundSync::pushTestOpToBuffer(const BSONObj& op) {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
_buffer.push(op);
}
diff --git a/src/mongo/db/repl/bgsync.h b/src/mongo/db/repl/bgsync.h
index ad16f4cdc99..ee14d380fe9 100644
--- a/src/mongo/db/repl/bgsync.h
+++ b/src/mongo/db/repl/bgsync.h
@@ -29,12 +29,12 @@
#pragma once
#include <boost/thread/condition.hpp>
-#include <boost/thread/mutex.hpp>
-#include "mongo/util/queue.h"
+#include "mongo/db/jsobj.h"
#include "mongo/db/repl/oplogreader.h"
#include "mongo/db/repl/optime.h"
-#include "mongo/db/jsobj.h"
+#include "mongo/stdx/mutex.h"
+#include "mongo/util/queue.h"
namespace mongo {
@@ -133,14 +133,14 @@ namespace repl {
private:
static BackgroundSync *s_instance;
// protects creation of s_instance
- static boost::mutex s_mutex;
+ static stdx::mutex s_mutex;
// Production thread
BlockingQueue<BSONObj> _buffer;
OplogReader _syncSourceReader;
// _mutex protects all of the class variables except _syncSourceReader and _buffer
- mutable boost::mutex _mutex;
+ mutable stdx::mutex _mutex;
OpTime _lastOpTimeFetched;
@@ -183,7 +183,7 @@ namespace repl {
// bool for indicating resync need on this node and the mutex that protects it
// The resync command sets this flag; the Applier thread observes and clears it.
bool _initialSyncRequestedFlag;
- boost::mutex _initialSyncMutex;
+ stdx::mutex _initialSyncMutex;
// This setting affects the Applier prefetcher behavior.
IndexPrefetchConfig _indexPrefetchConfig;
diff --git a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
index e8e210cb78c..8432769a99c 100644
--- a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
+++ b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
@@ -86,7 +86,7 @@ namespace {
std::unique_ptr<boost::thread> _executorThread;
std::unique_ptr<boost::thread> _quorumCheckThread;
Status _quorumCheckStatus;
- boost::mutex _mutex;
+ stdx::mutex _mutex;
bool _isQuorumCheckDone;
};
@@ -123,13 +123,13 @@ namespace {
}
bool CheckQuorumTest::isQuorumCheckDone() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
return _isQuorumCheckDone;
}
void CheckQuorumTest::_runQuorumCheck(const ReplicaSetConfig& config, int myIndex) {
_quorumCheckStatus = _runQuorumCheckImpl(config, myIndex);
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_isQuorumCheckDone = true;
}
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index a2bebd9facb..bc394af7b2f 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -130,7 +130,7 @@ namespace {
const char* ns,
ReplicationCoordinator* replCoord,
const char* opstr) {
- boost::lock_guard<boost::mutex> lk(newOpMutex);
+ stdx::lock_guard<stdx::mutex> lk(newOpMutex);
Timestamp ts = getNextGlobalTimestamp();
newTimestampNotifier.notify_all();
@@ -870,7 +870,7 @@ namespace {
}
void waitUpToOneSecondForTimestampChange(const Timestamp& referenceTime) {
- boost::unique_lock<boost::mutex> lk(newOpMutex);
+ stdx::unique_lock<stdx::mutex> lk(newOpMutex);
while (referenceTime == getLastSetTimestamp()) {
if (!newTimestampNotifier.timed_wait(lk, boost::posix_time::seconds(1)))
@@ -879,7 +879,7 @@ namespace {
}
void setNewTimestamp(const Timestamp& newTime) {
- boost::lock_guard<boost::mutex> lk(newOpMutex);
+ stdx::lock_guard<stdx::mutex> lk(newOpMutex);
setGlobalTimestamp(newTime);
newTimestampNotifier.notify_all();
}
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index 942681cedf6..f1d2232988f 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -85,7 +85,7 @@ namespace {
ReplicationCoordinatorExternalStateImpl::~ReplicationCoordinatorExternalStateImpl() {}
void ReplicationCoordinatorExternalStateImpl::startThreads() {
- boost::lock_guard<boost::mutex> lk(_threadMutex);
+ stdx::lock_guard<stdx::mutex> lk(_threadMutex);
if (_startedThreads) {
return;
}
@@ -104,7 +104,7 @@ namespace {
}
void ReplicationCoordinatorExternalStateImpl::shutdown() {
- boost::lock_guard<boost::mutex> lk(_threadMutex);
+ stdx::lock_guard<stdx::mutex> lk(_threadMutex);
if (_startedThreads) {
log() << "Stopping replication applier threads";
_syncSourceFeedback.shutdown();
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.h b/src/mongo/db/repl/replication_coordinator_external_state_impl.h
index 62edea65c70..8f5cc27d466 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.h
@@ -69,7 +69,7 @@ namespace repl {
private:
// Guards starting threads and setting _startedThreads
- boost::mutex _threadMutex;
+ stdx::mutex _threadMutex;
// True when the threads have been started
bool _startedThreads;
@@ -89,7 +89,7 @@ namespace repl {
std::unique_ptr<boost::thread> _producerThread;
// Mutex guarding the _nextThreadId value to prevent concurrent incrementing.
- boost::mutex _nextThreadIdMutex;
+ stdx::mutex _nextThreadIdMutex;
// Number used to uniquely name threads.
long long _nextThreadId;
};
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp b/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
index abad48703af..2ab0103f6b1 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
@@ -93,7 +93,7 @@ namespace repl {
OperationContext* txn,
const BSONObj& config) {
{
- boost::unique_lock<boost::mutex> lock(_shouldHangConfigMutex);
+ stdx::unique_lock<stdx::mutex> lock(_shouldHangConfigMutex);
while (_storeLocalConfigDocumentShouldHang) {
_shouldHangConfigCondVar.wait(lock);
}
@@ -120,7 +120,7 @@ namespace repl {
OperationContext* txn,
const LastVote& lastVote) {
{
- boost::unique_lock<boost::mutex> lock(_shouldHangLastVoteMutex);
+ stdx::unique_lock<stdx::mutex> lock(_shouldHangLastVoteMutex);
while (_storeLocalLastVoteDocumentShouldHang) {
_shouldHangLastVoteCondVar.wait(lock);
}
@@ -156,7 +156,7 @@ namespace repl {
}
void ReplicationCoordinatorExternalStateMock::setStoreLocalConfigDocumentToHang(bool hang) {
- boost::unique_lock<boost::mutex> lock(_shouldHangConfigMutex);
+ stdx::unique_lock<stdx::mutex> lock(_shouldHangConfigMutex);
_storeLocalConfigDocumentShouldHang = hang;
if (!hang) {
_shouldHangConfigCondVar.notify_all();
@@ -169,7 +169,7 @@ namespace repl {
}
void ReplicationCoordinatorExternalStateMock::setStoreLocalLastVoteDocumentToHang(bool hang) {
- boost::unique_lock<boost::mutex> lock(_shouldHangLastVoteMutex);
+ stdx::unique_lock<stdx::mutex> lock(_shouldHangLastVoteMutex);
_storeLocalLastVoteDocumentShouldHang = hang;
if (!hang) {
_shouldHangLastVoteCondVar.notify_all();
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_mock.h b/src/mongo/db/repl/replication_coordinator_external_state_mock.h
index eca57e01c06..c5e2684d5a3 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_mock.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state_mock.h
@@ -131,10 +131,10 @@ namespace repl {
Status _storeLocalConfigDocumentStatus;
Status _storeLocalLastVoteDocumentStatus;
// mutex and cond var for controlling stroeLocalConfigDocument()'s hanging
- boost::mutex _shouldHangConfigMutex;
+ stdx::mutex _shouldHangConfigMutex;
boost::condition _shouldHangConfigCondVar;
// mutex and cond var for controlling stroeLocalLastVoteDocument()'s hanging
- boost::mutex _shouldHangLastVoteMutex;
+ stdx::mutex _shouldHangLastVoteMutex;
boost::condition _shouldHangLastVoteCondVar;
bool _storeLocalConfigDocumentShouldHang;
bool _storeLocalLastVoteDocumentShouldHang;
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 23e15702299..19bfba131f4 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -79,7 +79,7 @@ namespace repl {
namespace {
using executor::NetworkInterface;
- void lockAndCall(boost::unique_lock<boost::mutex>* lk, const stdx::function<void ()>& fn) {
+ void lockAndCall(stdx::unique_lock<stdx::mutex>* lk, const stdx::function<void ()>& fn) {
if (!lk->owns_lock()) {
lk->lock();
}
@@ -228,14 +228,14 @@ namespace {
ReplicationCoordinatorImpl::~ReplicationCoordinatorImpl() {}
void ReplicationCoordinatorImpl::waitForStartUpComplete() {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
while (_rsConfigState == kConfigPreStart || _rsConfigState == kConfigStartingUp) {
_rsConfigStateChange.wait(lk);
}
}
ReplicaSetConfig ReplicationCoordinatorImpl::getReplicaSetConfig_forTest() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
return _rsConfig;
}
@@ -337,7 +337,7 @@ namespace {
}
}
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
invariant(_rsConfigState == kConfigStartingUp);
const PostMemberStateUpdateAction action =
_setCurrentRSConfig_inlock(localConfig, myIndex.getValue());
@@ -352,7 +352,7 @@ namespace {
void ReplicationCoordinatorImpl::startReplication(OperationContext* txn) {
if (!isReplEnabled()) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_setConfigState_inlock(kConfigReplicationDisabled);
return;
}
@@ -360,7 +360,7 @@ namespace {
{
OID rid = _externalState->ensureMe(txn);
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
fassert(18822, !_inShutdown);
_setConfigState_inlock(kConfigStartingUp);
_myRID = rid;
@@ -381,7 +381,7 @@ namespace {
if (doneLoadingConfig) {
// If we're not done loading the config, then the config state will be set by
// _finishLoadLocalConfig.
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
invariant(!_rsConfig.isInitialized());
_setConfigState_inlock(kConfigUninitialized);
}
@@ -399,7 +399,7 @@ namespace {
}
{
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
fassert(28533, !_inShutdown);
_inShutdown = true;
if (_rsConfigState == kConfigPreStart) {
@@ -430,7 +430,7 @@ namespace {
}
MemberState ReplicationCoordinatorImpl::getMemberState() const {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
return _getMemberState_inlock();
}
@@ -439,7 +439,7 @@ namespace {
}
Seconds ReplicationCoordinatorImpl::getSlaveDelaySecs() const {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
invariant(_rsConfig.isInitialized());
uassert(28524,
"Node not a member of the current set configuration",
@@ -530,7 +530,7 @@ namespace {
return;
}
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
_topCoord->setFollowerMode(newState.s);
const PostMemberStateUpdateAction action =
@@ -542,7 +542,7 @@ namespace {
}
bool ReplicationCoordinatorImpl::isWaitingForApplierToDrain() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
return _isWaitingForDrainToComplete;
}
@@ -569,7 +569,7 @@ namespace {
// external writes will be processed. This is important so that a new temp collection isn't
// introduced on the new primary before we drop all the temp collections.
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
if (!_isWaitingForDrainToComplete) {
return;
}
@@ -693,7 +693,7 @@ namespace {
Status ReplicationCoordinatorImpl::setLastOptimeForSlave(const OID& rid,
const Timestamp& ts) {
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
massert(28576,
"Received an old style replication progress update, which is only used for Master/"
"Slave replication now, but this node is not using Master/Slave replication. "
@@ -731,18 +731,18 @@ namespace {
}
void ReplicationCoordinatorImpl::setMyLastOptime(const OpTime& opTime) {
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
_setMyLastOptime_inlock(&lock, opTime, false);
}
void ReplicationCoordinatorImpl::resetMyLastOptime() {
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
// Reset to uninitialized OpTime
_setMyLastOptime_inlock(&lock, OpTime(), true);
}
void ReplicationCoordinatorImpl::_setMyLastOptime_inlock(
- boost::unique_lock<boost::mutex>* lock, const OpTime& opTime, bool isRollbackAllowed) {
+ stdx::unique_lock<stdx::mutex>* lock, const OpTime& opTime, bool isRollbackAllowed) {
invariant(lock->owns_lock());
SlaveInfo* mySlaveInfo = &_slaveInfo[_getMyIndexInSlaveInfo_inlock()];
invariant(isRollbackAllowed || mySlaveInfo->opTime <= opTime);
@@ -768,7 +768,7 @@ namespace {
}
OpTime ReplicationCoordinatorImpl::getMyLastOptime() const {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
return _getMyLastOptime_inlock();
}
@@ -797,7 +797,7 @@ namespace {
#endif
Timer timer;
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
while (ts > _getMyLastOptime_inlock()) {
Status interruptedStatus = txn->checkForInterruptNoAssert();
@@ -853,7 +853,7 @@ namespace {
Status ReplicationCoordinatorImpl::setLastOptime_forTest(long long cfgVer,
long long memberId,
const OpTime& opTime) {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
invariant(getReplicationMode() == modeReplSet);
const UpdatePositionArgs::UpdateInfo update(OID(), opTime, cfgVer, memberId);
@@ -925,7 +925,7 @@ namespace {
}
void ReplicationCoordinatorImpl::interrupt(unsigned opId) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
for (std::vector<WaiterInfo*>::iterator it = _replicationWaiterList.begin();
it != _replicationWaiterList.end(); ++it) {
WaiterInfo* info = *it;
@@ -949,7 +949,7 @@ namespace {
}
void ReplicationCoordinatorImpl::interruptAll() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
for (std::vector<WaiterInfo*>::iterator it = _replicationWaiterList.begin();
it != _replicationWaiterList.end(); ++it) {
WaiterInfo* info = *it;
@@ -1045,7 +1045,7 @@ namespace {
const OpTime& opTime,
const WriteConcernOptions& writeConcern) {
Timer timer;
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
return _awaitReplication_inlock(&timer, &lock, txn, opTime, writeConcern);
}
@@ -1054,7 +1054,7 @@ namespace {
OperationContext* txn,
const WriteConcernOptions& writeConcern) {
Timer timer;
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
return _awaitReplication_inlock(
&timer,
&lock,
@@ -1065,7 +1065,7 @@ namespace {
ReplicationCoordinator::StatusAndDuration ReplicationCoordinatorImpl::_awaitReplication_inlock(
const Timer* timer,
- boost::unique_lock<boost::mutex>* lock,
+ stdx::unique_lock<stdx::mutex>* lock,
OperationContext* txn,
const OpTime& opTime,
const WriteConcernOptions& writeConcern) {
@@ -1295,7 +1295,7 @@ namespace {
this,
stdx::placeholders::_1));
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
const PostMemberStateUpdateAction action =
_updateMemberStateFromTopologyCoordinator_inlock();
lk.unlock();
@@ -1353,7 +1353,7 @@ namespace {
bool ReplicationCoordinatorImpl::isMasterForReportingPurposes() {
if (_settings.usingReplSets()) {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
if (getReplicationMode() == modeReplSet && _getMemberState_inlock().primary()) {
return true;
}
@@ -1445,7 +1445,7 @@ namespace {
// always enforce on local
return false;
}
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
if (getReplicationMode() != modeReplSet) {
return false;
}
@@ -1463,12 +1463,12 @@ namespace {
}
OID ReplicationCoordinatorImpl::getElectionId() {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
return _electionId;
}
OID ReplicationCoordinatorImpl::getMyRID() const {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
return _getMyRID_inlock();
}
@@ -1477,7 +1477,7 @@ namespace {
}
int ReplicationCoordinatorImpl::getMyId() const {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
return _getMyId_inlock();
}
@@ -1488,7 +1488,7 @@ namespace {
bool ReplicationCoordinatorImpl::prepareReplSetUpdatePositionCommand(
BSONObjBuilder* cmdBuilder) {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
invariant(_rsConfig.isInitialized());
// do not send updates if we have been removed from the config
if (_selfIndex == -1) {
@@ -1572,7 +1572,7 @@ namespace {
}
void ReplicationCoordinatorImpl::appendSlaveInfoData(BSONObjBuilder* result) {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
BSONArrayBuilder replicationProgress(result->subarrayStart("replicationProgress"));
{
for (SlaveInfoVector::const_iterator itr = _slaveInfo.begin();
@@ -1594,12 +1594,12 @@ namespace {
}
ReplicaSetConfig ReplicationCoordinatorImpl::getConfig() const {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
return _rsConfig;
}
void ReplicationCoordinatorImpl::processReplSetGetConfig(BSONObjBuilder* result) {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
result->append("config", _rsConfig.toBSON());
}
@@ -1657,7 +1657,7 @@ namespace {
return;
}
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
if (_getMemberState_inlock().primary()) {
*result = Status(ErrorCodes::NotSecondary, "primaries can't modify maintenance mode");
return;
@@ -1749,7 +1749,7 @@ namespace {
Status ReplicationCoordinatorImpl::processHeartbeat(const ReplSetHeartbeatArgs& args,
ReplSetHeartbeatResponse* response) {
{
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
if (_rsConfigState == kConfigPreStart || _rsConfigState == kConfigStartingUp) {
return Status(ErrorCodes::NotYetInitialized,
"Received heartbeat while still initializing replication system");
@@ -1808,7 +1808,7 @@ namespace {
log() << "replSetReconfig admin command received from client";
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
while (_rsConfigState == kConfigPreStart || _rsConfigState == kConfigStartingUp) {
_rsConfigStateChange.wait(lk);
@@ -1931,7 +1931,7 @@ namespace {
const ReplicaSetConfig& newConfig,
int myIndex) {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
invariant(_rsConfigState == kConfigReconfiguring);
invariant(_rsConfig.isInitialized());
const PostMemberStateUpdateAction action = _setCurrentRSConfig_inlock(newConfig, myIndex);
@@ -1944,7 +1944,7 @@ namespace {
BSONObjBuilder* resultObj) {
log() << "replSetInitiate admin command received from client";
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
if (!_settings.usingReplSets()) {
return Status(ErrorCodes::NoReplicationEnabled, "server is not running with --replSet");
}
@@ -2035,7 +2035,7 @@ namespace {
const ReplicaSetConfig& newConfig,
int myIndex) {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
invariant(_rsConfigState == kConfigInitiating);
invariant(!_rsConfig.isInitialized());
const PostMemberStateUpdateAction action = _setCurrentRSConfig_inlock(newConfig, myIndex);
@@ -2120,7 +2120,7 @@ namespace {
_externalState->clearShardingState();
break;
case kActionWinElection: {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
_electionId = OID::gen();
_topCoord->processWinElection(_electionId, getNextGlobalTimestamp());
_isWaitingForDrainToComplete = true;
@@ -2138,13 +2138,13 @@ namespace {
}
Status ReplicationCoordinatorImpl::processReplSetGetRBID(BSONObjBuilder* resultObj) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
resultObj->append("rbid", _rbid);
return Status::OK();
}
void ReplicationCoordinatorImpl::incrementRollbackID() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
++_rbid;
}
@@ -2263,7 +2263,7 @@ namespace {
Status ReplicationCoordinatorImpl::processReplSetUpdatePosition(
const UpdatePositionArgs& updates, long long* configVersion) {
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
Status status = Status::OK();
bool somethingChanged = false;
for (UpdatePositionArgs::UpdateIterator update = updates.updatesBegin();
@@ -2290,7 +2290,7 @@ namespace {
const HandshakeArgs& handshake) {
LOG(2) << "Received handshake " << handshake.toBSON();
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
if (getReplicationMode() != modeMasterSlave) {
return Status(ErrorCodes::IllegalOperation,
@@ -2313,7 +2313,7 @@ namespace {
}
bool ReplicationCoordinatorImpl::buildsIndexes() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_selfIndex == -1) {
return true;
}
@@ -2323,7 +2323,7 @@ namespace {
std::vector<HostAndPort> ReplicationCoordinatorImpl::getHostsWrittenTo(const OpTime& op) {
std::vector<HostAndPort> hosts;
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
for (size_t i = 0; i < _slaveInfo.size(); ++i) {
const SlaveInfo& slaveInfo = _slaveInfo[i];
if (slaveInfo.opTime < op) {
@@ -2340,7 +2340,7 @@ namespace {
}
std::vector<HostAndPort> ReplicationCoordinatorImpl::getOtherNodesInReplSet() const {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
invariant(_settings.usingReplSets());
std::vector<HostAndPort> nodes;
@@ -2359,7 +2359,7 @@ namespace {
Status ReplicationCoordinatorImpl::checkIfWriteConcernCanBeSatisfied(
const WriteConcernOptions& writeConcern) const {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
return _checkIfWriteConcernCanBeSatisfied_inlock(writeConcern);
}
@@ -2384,7 +2384,7 @@ namespace {
}
WriteConcernOptions ReplicationCoordinatorImpl::getGetLastErrorDefault() {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
if (_rsConfig.isInitialized()) {
return _rsConfig.getDefaultWriteConcern();
}
@@ -2489,7 +2489,7 @@ namespace {
else {
lastOpTime = lastOpTimeStatus.getValue();
}
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
_setMyLastOptime_inlock(&lk, lastOpTime, true);
_externalState->setGlobalTimestamp(lastOpTime.getTimestamp());
}
@@ -2551,7 +2551,7 @@ namespace {
}
OpTime ReplicationCoordinatorImpl::getLastCommittedOpTime() const {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
return _lastCommittedOpTime;
}
@@ -2602,7 +2602,7 @@ namespace {
return;
}
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
_topCoord->processReplSetRequestVotes(args, response, getMyLastOptime());
*result = Status::OK();
}
@@ -2659,7 +2659,7 @@ namespace {
Status ReplicationCoordinatorImpl::processHeartbeatV1(const ReplSetHeartbeatArgsV1& args,
ReplSetHeartbeatResponse* response) {
{
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
if (_rsConfigState == kConfigPreStart || _rsConfigState == kConfigStartingUp) {
return Status(ErrorCodes::NotYetInitialized,
"Received heartbeat while still initializing replication system");
diff --git a/src/mongo/db/repl/replication_coordinator_impl.h b/src/mongo/db/repl/replication_coordinator_impl.h
index f49e2dde89e..2a5f11b410a 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_impl.h
@@ -29,14 +29,12 @@
#pragma once
#include <boost/thread.hpp>
-#include <boost/thread/mutex.hpp>
#include <boost/thread/condition_variable.hpp>
#include <vector>
#include <memory>
#include "mongo/base/status.h"
#include "mongo/bson/timestamp.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/repl/data_replicator.h"
#include "mongo/db/repl/member_state.h"
#include "mongo/db/repl/optime.h"
@@ -46,9 +44,11 @@
#include "mongo/db/repl/replication_executor.h"
#include "mongo/db/repl/storage_interface.h"
#include "mongo/db/repl/update_position_args.h"
+#include "mongo/db/service_context.h"
#include "mongo/platform/atomic_word.h"
#include "mongo/platform/unordered_map.h"
#include "mongo/platform/unordered_set.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/net/hostandport.h"
namespace mongo {
@@ -499,7 +499,7 @@ namespace repl {
*/
ReplicationCoordinator::StatusAndDuration _awaitReplication_inlock(
const Timer* timer,
- boost::unique_lock<boost::mutex>* lock,
+ stdx::unique_lock<stdx::mutex>* lock,
OperationContext* txn,
const OpTime& opTime,
const WriteConcernOptions& writeConcern);
@@ -583,7 +583,7 @@ namespace repl {
* This function has the same rules for "opTime" as setMyLastOptime(), unless
* "isRollbackAllowed" is true.
*/
- void _setMyLastOptime_inlock(boost::unique_lock<boost::mutex>* lock,
+ void _setMyLastOptime_inlock(stdx::unique_lock<stdx::mutex>* lock,
const OpTime& opTime,
bool isRollbackAllowed);
@@ -910,7 +910,7 @@ namespace repl {
// (I) Independently synchronized, see member variable comment.
// Protects member data of this ReplicationCoordinator.
- mutable boost::mutex _mutex; // (S)
+ mutable stdx::mutex _mutex; // (S)
// Handles to actively queued heartbeats.
HeartbeatHandles _heartbeatHandles; // (X)
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect.cpp
index ae9282e001e..35f5fdf9f9d 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect.cpp
@@ -88,7 +88,7 @@ namespace {
invariant(!_freshnessChecker);
invariant(!_electCmdRunner);
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
switch (_rsConfigState) {
case kConfigSteady:
break;
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
index 490ed4af6cf..686f7bbe5d3 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
@@ -91,7 +91,7 @@ namespace {
invariant(!_voteRequester);
invariant(!_freshnessChecker);
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
switch (_rsConfigState) {
case kConfigSteady:
break;
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
index 63af8b2bc03..4afdb8594e8 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
@@ -170,7 +170,7 @@ namespace {
targetIndex >= 0 &&
hbStatusResponse.getValue().hasState() &&
hbStatusResponse.getValue().getState() != MemberState::RS_PRIMARY) {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
if (hbStatusResponse.getValue().getConfigVersion() == _rsConfig.getConfigVersion()) {
_updateOpTimeFromHeartbeat_inlock(targetIndex,
hbStatusResponse.getValue().getOpTime());
@@ -209,7 +209,7 @@ namespace {
case HeartbeatResponseAction::NoAction:
// Update the cached member state if different than the current topology member state
if (_memberState != _topCoord->getMemberState()) {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
const PostMemberStateUpdateAction postUpdateAction =
_updateMemberStateFromTopologyCoordinator_inlock();
lk.unlock();
@@ -295,7 +295,7 @@ namespace {
invariant(cbData.txn);
// TODO Add invariant that we've got global shared or global exclusive lock, when supported
// by lock manager.
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
_topCoord->stepDownIfPending();
const PostMemberStateUpdateAction action =
_updateMemberStateFromTopologyCoordinator_inlock();
@@ -304,7 +304,7 @@ namespace {
}
void ReplicationCoordinatorImpl::_scheduleHeartbeatReconfig(const ReplicaSetConfig& newConfig) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_inShutdown) {
return;
}
@@ -360,7 +360,7 @@ namespace {
return;
}
fassert(18911, cbData.status);
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_inShutdown) {
return;
}
@@ -382,7 +382,7 @@ namespace {
return;
}
- boost::unique_lock<boost::mutex> lk(_mutex, boost::defer_lock_t());
+ stdx::unique_lock<stdx::mutex> lk(_mutex, stdx::defer_lock);
const StatusWith<int> myIndex = validateConfigForHeartbeatReconfig(
_externalState.get(),
@@ -459,7 +459,7 @@ namespace {
return;
}
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
invariant(_rsConfigState == kConfigHBReconfiguring);
invariant(!_rsConfig.isInitialized() ||
_rsConfig.getConfigVersion() < newConfig.getConfigVersion());
diff --git a/src/mongo/db/repl/replication_executor.cpp b/src/mongo/db/repl/replication_executor.cpp
index 9866d08a451..d7c4103f9f5 100644
--- a/src/mongo/db/repl/replication_executor.cpp
+++ b/src/mongo/db/repl/replication_executor.cpp
@@ -74,7 +74,7 @@ namespace {
ReplicationExecutor::~ReplicationExecutor() {}
std::string ReplicationExecutor::getDiagnosticString() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
return _getDiagnosticString_inlock();
}
@@ -105,7 +105,7 @@ namespace {
std::pair<WorkItem, CallbackHandle> work;
while ((work = getWork()).first.callback.isValid()) {
{
- boost::lock_guard<boost::mutex> lk(_terribleExLockSyncMutex);
+ stdx::lock_guard<stdx::mutex> lk(_terribleExLockSyncMutex);
const Callback* callback = _getCallbackFromHandle(work.first.callback);
const Status inStatus = callback->_isCanceled ?
Status(ErrorCodes::CallbackCanceled, "Callback canceled") :
@@ -125,7 +125,7 @@ namespace {
// * drain all of the unsignaled events, sleepers, and ready queue, by running those
// callbacks with a "shutdown" or "canceled" status.
// * Signal all threads blocked in waitForEvent, and wait for them to return from that method.
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_inShutdown = true;
_readyQueue.splice(_readyQueue.end(), _dbWorkInProgressQueue);
@@ -145,7 +145,7 @@ namespace {
_dblockExclusiveLockTaskRunner.cancel();
_dblockTaskRunner.cancel();
_dblockWorkers.join();
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
invariant(_inShutdown);
invariant(_dbWorkInProgressQueue.empty());
invariant(_exclusiveLockInProgressQueue.empty());
@@ -174,7 +174,7 @@ namespace {
}
StatusWith<ReplicationExecutor::EventHandle> ReplicationExecutor::makeEvent() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
return makeEvent_inlock();
}
@@ -189,7 +189,7 @@ namespace {
}
void ReplicationExecutor::signalEvent(const EventHandle& eventHandle) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
signalEvent_inlock(eventHandle);
}
@@ -214,7 +214,7 @@ namespace {
StatusWith<ReplicationExecutor::CallbackHandle> ReplicationExecutor::onEvent(
const EventHandle& eventHandle,
const CallbackFn& work) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
WorkQueue* queue = &_readyQueue;
Event* event = _getEventFromHandle(eventHandle);
if (!event->_isSignaled) {
@@ -268,7 +268,7 @@ namespace {
Callback* callback = _getCallbackFromHandle(cbHandle);
const WorkQueue::iterator iter = callback->_iter;
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_inShutdown) {
return;
}
@@ -299,7 +299,7 @@ namespace {
else {
scheduledRequest.expirationDate = _networkInterface->now() + scheduledRequest.timeout;
}
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
StatusWith<CallbackHandle> handle = enqueueWork_inlock(
&_networkInProgressQueue,
stdx::bind(remoteCommandFailedEarly,
@@ -327,7 +327,7 @@ namespace {
StatusWith<ReplicationExecutor::CallbackHandle> ReplicationExecutor::scheduleWork(
const CallbackFn& work) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_networkInterface->signalWorkAvailable();
return enqueueWork_inlock(&_readyQueue, work);
}
@@ -336,7 +336,7 @@ namespace {
Date_t when,
const CallbackFn& work) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
WorkQueue temp;
StatusWith<CallbackHandle> cbHandle = enqueueWork_inlock(&temp, work);
if (!cbHandle.isOK())
@@ -359,7 +359,7 @@ namespace {
const NamespaceString& nss,
LockMode mode) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
StatusWith<CallbackHandle> handle = enqueueWork_inlock(&_dbWorkInProgressQueue,
work);
if (handle.isOK()) {
@@ -389,8 +389,8 @@ namespace {
const Status& taskRunnerStatus,
const CallbackHandle& cbHandle,
WorkQueue* workQueue,
- boost::mutex* terribleExLockSyncMutex) {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::mutex* terribleExLockSyncMutex) {
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
if (_inShutdown)
return;
Callback* callback = _getCallbackFromHandle(cbHandle);
@@ -399,9 +399,9 @@ namespace {
_freeQueue.splice(_freeQueue.begin(), *workQueue, iter);
lk.unlock();
{
- std::unique_ptr<boost::lock_guard<boost::mutex> > terribleLock(
+ std::unique_ptr<stdx::lock_guard<stdx::mutex> > terribleLock(
terribleExLockSyncMutex ?
- new boost::lock_guard<boost::mutex>(*terribleExLockSyncMutex) :
+ new stdx::lock_guard<stdx::mutex>(*terribleExLockSyncMutex) :
nullptr);
// Only possible task runner error status is CallbackCanceled.
callback->_callbackFn(CallbackArgs(this,
@@ -420,7 +420,7 @@ namespace {
ReplicationExecutor::scheduleWorkWithGlobalExclusiveLock(
const CallbackFn& work) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
StatusWith<CallbackHandle> handle = enqueueWork_inlock(&_exclusiveLockInProgressQueue,
work);
if (handle.isOK()) {
@@ -444,7 +444,7 @@ namespace {
std::pair<ReplicationExecutor::WorkItem, ReplicationExecutor::CallbackHandle>
ReplicationExecutor::getWork() {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
while (true) {
const Date_t now = _networkInterface->now();
Date_t nextWakeupDate = scheduleReadySleepers_inlock(now);
@@ -539,7 +539,7 @@ namespace {
}
void ReplicationExecutor::Event::waitUntilSignaled() {
- boost::unique_lock<boost::mutex> lk(_executor->_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_executor->_mutex);
++_executor->_totalEventWaiters;
while (!_isSignaled) {
_isSignaledCondition.wait(lk);
@@ -549,7 +549,7 @@ namespace {
}
bool ReplicationExecutor::Event::isSignaled() {
- boost::lock_guard<boost::mutex> lk(_executor->_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_executor->_mutex);
return _isSignaled;
}
@@ -567,7 +567,7 @@ namespace {
ReplicationExecutor::Callback::~Callback() {}
void ReplicationExecutor::Callback::cancel() {
- boost::unique_lock<boost::mutex> lk(_executor->_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_executor->_mutex);
_isCanceled = true;
if (_iter->isNetworkOperation) {
lk.unlock();
diff --git a/src/mongo/db/repl/replication_executor.h b/src/mongo/db/repl/replication_executor.h
index 567cf46f67a..3e65beb0765 100644
--- a/src/mongo/db/repl/replication_executor.h
+++ b/src/mongo/db/repl/replication_executor.h
@@ -29,7 +29,6 @@
#pragma once
#include <boost/thread/condition_variable.hpp>
-#include <boost/thread/mutex.hpp>
#include <string>
#include <thread>
@@ -45,6 +44,7 @@
#include "mongo/platform/random.h"
#include "mongo/stdx/functional.h"
#include "mongo/stdx/list.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/net/hostandport.h"
#include "mongo/util/time_support.h"
@@ -285,7 +285,7 @@ namespace repl {
const Status& taskRunnerStatus,
const CallbackHandle& cbHandle,
WorkQueue* workQueue,
- boost::mutex* terribleExLockSyncMutex);
+ stdx::mutex* terribleExLockSyncMutex);
/**
* Wrapper around TaskExecutor::getCallbackFromHandle that return an Event* instead of
@@ -304,8 +304,8 @@ namespace repl {
std::unique_ptr<executor::NetworkInterface> _networkInterface;
std::unique_ptr<StorageInterface> _storageInterface;
- boost::mutex _mutex;
- boost::mutex _terribleExLockSyncMutex;
+ stdx::mutex _mutex;
+ stdx::mutex _terribleExLockSyncMutex;
boost::condition_variable _noMoreWaitingThreads;
WorkQueue _freeQueue;
WorkQueue _readyQueue;
diff --git a/src/mongo/db/repl/reporter.cpp b/src/mongo/db/repl/reporter.cpp
index e72828d3758..c3d7bd79903 100644
--- a/src/mongo/db/repl/reporter.cpp
+++ b/src/mongo/db/repl/reporter.cpp
@@ -62,7 +62,7 @@ namespace repl {
}
void Reporter::cancel() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
if (!_active) {
return;
@@ -76,7 +76,7 @@ namespace repl {
}
void Reporter::wait() {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
if (!_active) {
return;
}
@@ -86,7 +86,7 @@ namespace repl {
}
Status Reporter::trigger() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
return _schedule_inlock();
}
@@ -124,7 +124,7 @@ namespace repl {
}
void Reporter::_callback(const ReplicationExecutor::RemoteCommandCallbackArgs& rcbd) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_status = rcbd.response.getStatus();
_active = false;
@@ -139,17 +139,17 @@ namespace repl {
}
Status Reporter::getStatus() const {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
return _status;
}
bool Reporter::isActive() const {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
return _active;
}
bool Reporter::willRunAgain() const {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
return _willRunAgain;
}
} // namespace repl
diff --git a/src/mongo/db/repl/reporter.h b/src/mongo/db/repl/reporter.h
index 4dbf86c4ed5..22d2bb1fa1a 100644
--- a/src/mongo/db/repl/reporter.h
+++ b/src/mongo/db/repl/reporter.h
@@ -105,7 +105,7 @@ namespace repl {
HostAndPort _target;
// Protects member data of this Reporter.
- mutable boost::mutex _mutex;
+ mutable stdx::mutex _mutex;
// Stores the most recent Status returned from the ReplicationExecutor.
Status _status;
diff --git a/src/mongo/db/repl/sync_source_feedback.cpp b/src/mongo/db/repl/sync_source_feedback.cpp
index 960fd92ac6c..49c70c3c2b7 100644
--- a/src/mongo/db/repl/sync_source_feedback.cpp
+++ b/src/mongo/db/repl/sync_source_feedback.cpp
@@ -99,7 +99,7 @@ namespace repl {
}
void SyncSourceFeedback::forwardSlaveProgress() {
- boost::unique_lock<boost::mutex> lock(_mtx);
+ stdx::unique_lock<stdx::mutex> lock(_mtx);
_positionChanged = true;
_cond.notify_all();
}
@@ -112,7 +112,7 @@ namespace repl {
}
BSONObjBuilder cmd;
{
- boost::unique_lock<boost::mutex> lock(_mtx);
+ stdx::unique_lock<stdx::mutex> lock(_mtx);
// the command could not be created, likely because the node was removed from the set
if (!replCoord->prepareReplSetUpdatePositionCommand(&cmd)) {
return Status::OK();
@@ -150,7 +150,7 @@ namespace repl {
}
void SyncSourceFeedback::shutdown() {
- boost::unique_lock<boost::mutex> lock(_mtx);
+ stdx::unique_lock<stdx::mutex> lock(_mtx);
_shutdownSignaled = true;
_cond.notify_all();
}
@@ -161,7 +161,7 @@ namespace repl {
ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
while (true) { // breaks once _shutdownSignaled is true
{
- boost::unique_lock<boost::mutex> lock(_mtx);
+ stdx::unique_lock<stdx::mutex> lock(_mtx);
while (!_positionChanged && !_shutdownSignaled) {
_cond.wait(lock);
}
@@ -188,13 +188,13 @@ namespace repl {
// fix connection if need be
if (target.empty()) {
sleepmillis(500);
- boost::unique_lock<boost::mutex> lock(_mtx);
+ stdx::unique_lock<stdx::mutex> lock(_mtx);
_positionChanged = true;
continue;
}
if (!_connect(txn.get(), target)) {
sleepmillis(500);
- boost::unique_lock<boost::mutex> lock(_mtx);
+ stdx::unique_lock<stdx::mutex> lock(_mtx);
_positionChanged = true;
continue;
}
@@ -202,7 +202,7 @@ namespace repl {
Status status = updateUpstream(txn.get());
if (!status.isOK()) {
sleepmillis(500);
- boost::unique_lock<boost::mutex> lock(_mtx);
+ stdx::unique_lock<stdx::mutex> lock(_mtx);
_positionChanged = true;
}
}
diff --git a/src/mongo/db/repl/sync_source_feedback.h b/src/mongo/db/repl/sync_source_feedback.h
index 40d22bddb5e..900019cd3e8 100644
--- a/src/mongo/db/repl/sync_source_feedback.h
+++ b/src/mongo/db/repl/sync_source_feedback.h
@@ -29,11 +29,11 @@
#pragma once
-#include <boost/thread/mutex.hpp>
#include <boost/thread/condition.hpp>
#include "mongo/client/constants.h"
#include "mongo/client/dbclientcursor.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/net/hostandport.h"
namespace mongo {
@@ -83,7 +83,7 @@ namespace repl {
// our connection to our sync target
std::unique_ptr<DBClientConnection> _connection;
// protects cond, _shutdownSignaled, and _positionChanged.
- boost::mutex _mtx;
+ stdx::mutex _mtx;
// used to alert our thread of changes which need to be passed up the chain
boost::condition _cond;
// used to indicate a position change which has not yet been pushed along
diff --git a/src/mongo/db/repl/task_runner.cpp b/src/mongo/db/repl/task_runner.cpp
index f1b54c295d0..421fa9c9fb5 100644
--- a/src/mongo/db/repl/task_runner.cpp
+++ b/src/mongo/db/repl/task_runner.cpp
@@ -85,7 +85,7 @@ namespace {
TaskRunner::~TaskRunner() {
try {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
if (!_active) {
return;
}
@@ -101,7 +101,7 @@ namespace {
}
std::string TaskRunner::getDiagnosticString() const {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
str::stream output;
output << "TaskRunner";
output << " scheduled tasks: " << _tasks.size();
@@ -111,14 +111,14 @@ namespace {
}
bool TaskRunner::isActive() const {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
return _active;
}
void TaskRunner::schedule(const Task& task) {
invariant(task);
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_tasks.push_back(task);
_condition.notify_all();
@@ -134,7 +134,7 @@ namespace {
}
void TaskRunner::cancel() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_cancelRequested = true;
_condition.notify_all();
}
@@ -159,7 +159,7 @@ namespace {
// Release thread back to pool after disposing if no scheduled tasks in queue.
if (nextAction == NextAction::kDisposeOperationContext ||
nextAction == NextAction::kInvalid) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_tasks.empty()) {
_finishRunTasks_inlock();
return;
@@ -170,7 +170,7 @@ namespace {
std::list<Task> tasks;
{
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
tasks.swap(_tasks);
}
@@ -180,7 +180,7 @@ namespace {
"this task has been canceled by a previously invoked task"));
}
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_finishRunTasks_inlock();
}
@@ -191,7 +191,7 @@ namespace {
}
TaskRunner::Task TaskRunner::_waitForNextTask() {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
while (_tasks.empty() && !_cancelRequested) {
_condition.wait(lk);
diff --git a/src/mongo/db/repl/task_runner.h b/src/mongo/db/repl/task_runner.h
index fb7985df7ca..3aa875205d1 100644
--- a/src/mongo/db/repl/task_runner.h
+++ b/src/mongo/db/repl/task_runner.h
@@ -29,11 +29,11 @@
#pragma once
#include <boost/thread/condition.hpp>
-#include <boost/thread/mutex.hpp>
#include <list>
#include "mongo/base/disallow_copying.h"
#include "mongo/stdx/functional.h"
+#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -152,7 +152,7 @@ namespace repl {
CreateOperationContextFn _createOperationContext;
// Protects member data of this TaskRunner.
- mutable boost::mutex _mutex;
+ mutable stdx::mutex _mutex;
boost::condition _condition;
diff --git a/src/mongo/db/service_context.cpp b/src/mongo/db/service_context.cpp
index 53b2857bd86..feb28ceadb6 100644
--- a/src/mongo/db/service_context.cpp
+++ b/src/mongo/db/service_context.cpp
@@ -110,7 +110,7 @@ namespace mongo {
}
ServiceContext::~ServiceContext() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
invariant(_clients.empty());
}
@@ -136,7 +136,7 @@ namespace mongo {
throw;
}
{
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
invariant(_clients.insert(client.get()).second);
}
return UniqueClient(client.release());
@@ -145,7 +145,7 @@ namespace mongo {
void ServiceContext::ClientDeleter::operator()(Client* client) const {
ServiceContext* const service = client->getServiceContext();
{
- boost::lock_guard<boost::mutex> lk(service->_mutex);
+ stdx::lock_guard<stdx::mutex> lk(service->_mutex);
invariant(service->_clients.erase(client));
}
try {
diff --git a/src/mongo/db/service_context.h b/src/mongo/db/service_context.h
index 3153558f63b..accd9232947 100644
--- a/src/mongo/db/service_context.h
+++ b/src/mongo/db/service_context.h
@@ -157,7 +157,7 @@ namespace mongo {
Client* next();
private:
- boost::unique_lock<boost::mutex> _lock;
+ stdx::unique_lock<stdx::mutex> _lock;
ClientSet::const_iterator _curr;
ClientSet::const_iterator _end;
};
@@ -310,7 +310,7 @@ namespace mongo {
* Mutex used to synchronize access to mutable state of this ServiceContext instance,
* including possibly by its subclasses.
*/
- boost::mutex _mutex;
+ stdx::mutex _mutex;
private:
/**
diff --git a/src/mongo/db/stats/lock_server_status_section.cpp b/src/mongo/db/stats/lock_server_status_section.cpp
index 7b1350af9c0..c8d583d403e 100644
--- a/src/mongo/db/stats/lock_server_status_section.cpp
+++ b/src/mongo/db/stats/lock_server_status_section.cpp
@@ -60,7 +60,7 @@ namespace {
invariant(client);
++numTotal;
- boost::unique_lock<Client> uniqueLock(*client);
+ stdx::unique_lock<Client> uniqueLock(*client);
const OperationContext* opCtx = client->getOperationContext();
if (opCtx == NULL) continue;
diff --git a/src/mongo/db/stats/snapshots.cpp b/src/mongo/db/stats/snapshots.cpp
index d6ab900420f..80b5a66c98f 100644
--- a/src/mongo/db/stats/snapshots.cpp
+++ b/src/mongo/db/stats/snapshots.cpp
@@ -80,7 +80,7 @@ namespace mongo {
{}
const SnapshotData* Snapshots::takeSnapshot() {
- boost::lock_guard<boost::mutex> lk(_lock);
+ stdx::lock_guard<stdx::mutex> lk(_lock);
_loc = ( _loc + 1 ) % kNumSnapshots;
_snapshots[_loc].takeSnapshot();
if ( _stored < kNumSnapshots )
@@ -89,7 +89,7 @@ namespace mongo {
}
StatusWith<SnapshotDiff> Snapshots::computeDelta() {
- boost::lock_guard<boost::mutex> lk(_lock);
+ stdx::lock_guard<stdx::mutex> lk(_lock);
// We need 2 snapshots to calculate a delta
if (_stored < 2) {
diff --git a/src/mongo/db/storage/in_memory/in_memory_engine.cpp b/src/mongo/db/storage/in_memory/in_memory_engine.cpp
index 6c57429215d..395d002b26a 100644
--- a/src/mongo/db/storage/in_memory/in_memory_engine.cpp
+++ b/src/mongo/db/storage/in_memory/in_memory_engine.cpp
@@ -53,7 +53,7 @@ namespace mongo {
StringData ns,
StringData ident,
const CollectionOptions& options) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
if (options.capped) {
return new InMemoryRecordStore(ns,
&_dataMap[ident],
@@ -77,13 +77,13 @@ namespace mongo {
SortedDataInterface* InMemoryEngine::getSortedDataInterface(OperationContext* opCtx,
StringData ident,
const IndexDescriptor* desc) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
return getInMemoryBtreeImpl(Ordering::make(desc->keyPattern()), &_dataMap[ident]);
}
Status InMemoryEngine::dropIdent(OperationContext* opCtx,
StringData ident) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_dataMap.erase(ident);
return Status::OK();
}
@@ -96,7 +96,7 @@ namespace mongo {
std::vector<std::string> InMemoryEngine::getAllIdents( OperationContext* opCtx ) const {
std::vector<std::string> all;
{
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
for ( DataMap::const_iterator it = _dataMap.begin(); it != _dataMap.end(); ++it ) {
all.push_back( it->first );
}
diff --git a/src/mongo/db/storage/in_memory/in_memory_engine.h b/src/mongo/db/storage/in_memory/in_memory_engine.h
index 687104254d3..c7e527ec2f7 100644
--- a/src/mongo/db/storage/in_memory/in_memory_engine.h
+++ b/src/mongo/db/storage/in_memory/in_memory_engine.h
@@ -30,9 +30,8 @@
#pragma once
-#include <boost/thread/mutex.hpp>
-
#include "mongo/db/storage/kv/kv_engine.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/string_map.h"
namespace mongo {
@@ -89,7 +88,7 @@ namespace mongo {
private:
typedef StringMap<std::shared_ptr<void> > DataMap;
- mutable boost::mutex _mutex;
+ mutable stdx::mutex _mutex;
DataMap _dataMap; // All actual data is owned in here
};
diff --git a/src/mongo/db/storage/kv/kv_catalog.cpp b/src/mongo/db/storage/kv/kv_catalog.cpp
index 6797cdf5bff..b24cc705226 100644
--- a/src/mongo/db/storage/kv/kv_catalog.cpp
+++ b/src/mongo/db/storage/kv/kv_catalog.cpp
@@ -64,7 +64,7 @@ namespace {
virtual void commit() {}
virtual void rollback() {
- boost::lock_guard<boost::mutex> lk(_catalog->_identsLock);
+ stdx::lock_guard<stdx::mutex> lk(_catalog->_identsLock);
_catalog->_idents.erase(_ident);
}
@@ -80,7 +80,7 @@ namespace {
virtual void commit() {}
virtual void rollback() {
- boost::lock_guard<boost::mutex> lk(_catalog->_identsLock);
+ stdx::lock_guard<stdx::mutex> lk(_catalog->_identsLock);
_catalog->_idents[_ident] = _entry;
}
@@ -149,7 +149,7 @@ namespace {
}
void KVCatalog::getAllCollections( std::vector<std::string>* out ) const {
- boost::lock_guard<boost::mutex> lk( _identsLock );
+ stdx::lock_guard<stdx::mutex> lk( _identsLock );
for ( NSToIdentMap::const_iterator it = _idents.begin(); it != _idents.end(); ++it ) {
out->push_back( it->first );
}
@@ -170,7 +170,7 @@ namespace {
const string ident = _newUniqueIdent(ns, "collection");
- boost::lock_guard<boost::mutex> lk( _identsLock );
+ stdx::lock_guard<stdx::mutex> lk( _identsLock );
Entry& old = _idents[ns.toString()];
if ( !old.ident.empty() ) {
return Status( ErrorCodes::NamespaceExists, "collection already exists" );
@@ -200,7 +200,7 @@ namespace {
}
std::string KVCatalog::getCollectionIdent( StringData ns ) const {
- boost::lock_guard<boost::mutex> lk( _identsLock );
+ stdx::lock_guard<stdx::mutex> lk( _identsLock );
NSToIdentMap::const_iterator it = _idents.find( ns.toString() );
invariant( it != _idents.end() );
return it->second.ident;
@@ -227,7 +227,7 @@ namespace {
RecordId dl;
{
- boost::lock_guard<boost::mutex> lk( _identsLock );
+ stdx::lock_guard<stdx::mutex> lk( _identsLock );
NSToIdentMap::const_iterator it = _idents.find( ns.toString() );
invariant( it != _idents.end() );
dl = it->second.storedLoc;
@@ -353,7 +353,7 @@ namespace {
invariant( status.getValue() == loc );
}
- boost::lock_guard<boost::mutex> lk( _identsLock );
+ stdx::lock_guard<stdx::mutex> lk( _identsLock );
const NSToIdentMap::iterator fromIt = _idents.find(fromNS.toString());
invariant(fromIt != _idents.end());
@@ -377,7 +377,7 @@ namespace {
MODE_X));
}
- boost::lock_guard<boost::mutex> lk( _identsLock );
+ stdx::lock_guard<stdx::mutex> lk( _identsLock );
const NSToIdentMap::iterator it = _idents.find(ns.toString());
if (it == _idents.end()) {
return Status( ErrorCodes::NamespaceNotFound, "collection not found" );
@@ -396,7 +396,7 @@ namespace {
std::vector<std::string> v;
{
- boost::lock_guard<boost::mutex> lk( _identsLock );
+ stdx::lock_guard<stdx::mutex> lk( _identsLock );
for ( NSToIdentMap::const_iterator it = _idents.begin(); it != _idents.end(); ++it ) {
NamespaceString ns( it->first );
if ( ns.db() != db )
diff --git a/src/mongo/db/storage/kv/kv_catalog.h b/src/mongo/db/storage/kv/kv_catalog.h
index cbdbfc515bc..d253b9a1828 100644
--- a/src/mongo/db/storage/kv/kv_catalog.h
+++ b/src/mongo/db/storage/kv/kv_catalog.h
@@ -33,12 +33,11 @@
#include <map>
#include <string>
-#include <boost/thread/mutex.hpp>
-
#include "mongo/base/string_data.h"
#include "mongo/db/catalog/collection_options.h"
#include "mongo/db/record_id.h"
#include "mongo/db/storage/bson_collection_catalog_entry.h"
+#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -128,7 +127,7 @@ namespace mongo {
};
typedef std::map<std::string,Entry> NSToIdentMap;
NSToIdentMap _idents;
- mutable boost::mutex _identsLock;
+ mutable stdx::mutex _identsLock;
};
}
diff --git a/src/mongo/db/storage/kv/kv_storage_engine.cpp b/src/mongo/db/storage/kv/kv_storage_engine.cpp
index 27e2e567ec2..15a17987ff7 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine.cpp
+++ b/src/mongo/db/storage/kv/kv_storage_engine.cpp
@@ -61,7 +61,7 @@ namespace mongo {
}
virtual void rollback() {
- boost::lock_guard<boost::mutex> lk(_engine->_dbsLock);
+ stdx::lock_guard<stdx::mutex> lk(_engine->_dbsLock);
_engine->_dbs[_db] = _entry;
}
@@ -196,7 +196,7 @@ namespace mongo {
}
void KVStorageEngine::listDatabases( std::vector<std::string>* out ) const {
- boost::lock_guard<boost::mutex> lk( _dbsLock );
+ stdx::lock_guard<stdx::mutex> lk( _dbsLock );
for ( DBMap::const_iterator it = _dbs.begin(); it != _dbs.end(); ++it ) {
if ( it->second->isEmpty() )
continue;
@@ -206,7 +206,7 @@ namespace mongo {
DatabaseCatalogEntry* KVStorageEngine::getDatabaseCatalogEntry( OperationContext* opCtx,
StringData dbName ) {
- boost::lock_guard<boost::mutex> lk( _dbsLock );
+ stdx::lock_guard<stdx::mutex> lk( _dbsLock );
KVDatabaseCatalogEntry*& db = _dbs[dbName.toString()];
if ( !db ) {
// Not registering change since db creation is implicit and never rolled back.
@@ -224,7 +224,7 @@ namespace mongo {
KVDatabaseCatalogEntry* entry;
{
- boost::lock_guard<boost::mutex> lk( _dbsLock );
+ stdx::lock_guard<stdx::mutex> lk( _dbsLock );
DBMap::const_iterator it = _dbs.find( db.toString() );
if ( it == _dbs.end() )
return Status( ErrorCodes::NamespaceNotFound, "db not found to drop" );
@@ -250,7 +250,7 @@ namespace mongo {
invariant( toDrop.empty() );
{
- boost::lock_guard<boost::mutex> lk( _dbsLock );
+ stdx::lock_guard<stdx::mutex> lk( _dbsLock );
txn->recoveryUnit()->registerChange(new RemoveDBChange(this, db, entry));
_dbs.erase( db.toString() );
}
diff --git a/src/mongo/db/storage/kv/kv_storage_engine.h b/src/mongo/db/storage/kv/kv_storage_engine.h
index f8106959dc3..3159180d193 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine.h
+++ b/src/mongo/db/storage/kv/kv_storage_engine.h
@@ -33,11 +33,10 @@
#include <map>
#include <string>
-#include <boost/thread/mutex.hpp>
-
#include "mongo/db/storage/kv/kv_catalog.h"
#include "mongo/db/storage/record_store.h"
#include "mongo/db/storage/storage_engine.h"
+#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -111,7 +110,7 @@ namespace mongo {
typedef std::map<std::string,KVDatabaseCatalogEntry*> DBMap;
DBMap _dbs;
- mutable boost::mutex _dbsLock;
+ mutable stdx::mutex _dbsLock;
};
}
diff --git a/src/mongo/db/storage/mmap_v1/dur.cpp b/src/mongo/db/storage/mmap_v1/dur.cpp
index 4b33d7a2bf9..e5ba4060a84 100644
--- a/src/mongo/db/storage/mmap_v1/dur.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur.cpp
@@ -74,7 +74,6 @@
#include "mongo/db/storage/mmap_v1/dur.h"
#include <boost/thread/condition_variable.hpp>
-#include <boost/thread/mutex.hpp>
#include <boost/thread/thread.hpp>
#include <iomanip>
#include <utility>
@@ -84,14 +83,15 @@
#include "mongo/db/concurrency/lock_state.h"
#include "mongo/db/operation_context_impl.h"
#include "mongo/db/storage/mmap_v1/aligned_builder.h"
-#include "mongo/db/storage/mmap_v1/durable_mapped_file.h"
#include "mongo/db/storage/mmap_v1/dur_commitjob.h"
#include "mongo/db/storage/mmap_v1/dur_journal.h"
#include "mongo/db/storage/mmap_v1/dur_journal_writer.h"
#include "mongo/db/storage/mmap_v1/dur_recover.h"
#include "mongo/db/storage/mmap_v1/dur_stats.h"
+#include "mongo/db/storage/mmap_v1/durable_mapped_file.h"
#include "mongo/db/storage/mmap_v1/mmap_v1_options.h"
#include "mongo/db/storage_options.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/synchronization.h"
#include "mongo/util/exit.h"
#include "mongo/util/log.h"
@@ -113,7 +113,7 @@ namespace dur {
namespace {
// Used to activate the flush thread
- boost::mutex flushMutex;
+ stdx::mutex flushMutex;
boost::condition_variable flushRequested;
// This is waited on for getlasterror acknowledgements. It means that data has been written to
@@ -697,7 +697,7 @@ namespace {
}
try {
- boost::unique_lock<boost::mutex> lock(flushMutex);
+ stdx::unique_lock<stdx::mutex> lock(flushMutex);
for (unsigned i = 0; i <= 2; i++) {
if (boost::cv_status::no_timeout == flushRequested.wait_for(
diff --git a/src/mongo/db/storage/mmap_v1/dur_journal.cpp b/src/mongo/db/storage/mmap_v1/dur_journal.cpp
index 1a08dc3c01c..8eeb229d5bc 100644
--- a/src/mongo/db/storage/mmap_v1/dur_journal.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_journal.cpp
@@ -162,7 +162,7 @@ namespace mongo {
SecureRandom* mySecureRandom = NULL;
mongo::mutex mySecureRandomMutex;
int64_t getMySecureRandomNumber() {
- boost::lock_guard<boost::mutex> lk( mySecureRandomMutex );
+ stdx::lock_guard<stdx::mutex> lk( mySecureRandomMutex );
if ( ! mySecureRandom )
mySecureRandom = SecureRandom::create();
return mySecureRandom->nextInt64();
diff --git a/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp b/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp
index b79dba66cfd..555b5f8e558 100644
--- a/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp
@@ -136,7 +136,7 @@ namespace mongo {
(although not assured) that it is journaled here once.
*/
static void prepBasicWrites(AlignedBuilder& bb, const std::vector<WriteIntent>& intents) {
- boost::lock_guard<boost::mutex> lk(privateViews._mutex());
+ stdx::lock_guard<stdx::mutex> lk(privateViews._mutex());
// Each time write intents switch to a different database we journal a JDbContext.
// Switches will be rare as we sort by memory location first and we batch commit.
diff --git a/src/mongo/db/storage/mmap_v1/dur_recover.cpp b/src/mongo/db/storage/mmap_v1/dur_recover.cpp
index edef79fc92b..bfd023affab 100644
--- a/src/mongo/db/storage/mmap_v1/dur_recover.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_recover.cpp
@@ -267,7 +267,7 @@ namespace mongo {
}
void RecoveryJob::close() {
- boost::lock_guard<boost::mutex> lk(_mx);
+ stdx::lock_guard<stdx::mutex> lk(_mx);
_close();
}
@@ -387,7 +387,7 @@ namespace mongo {
void RecoveryJob::processSection(const JSectHeader *h, const void *p, unsigned len, const JSectFooter *f) {
LockMongoFilesShared lkFiles; // for RecoveryJob::Last
- boost::lock_guard<boost::mutex> lk(_mx);
+ stdx::lock_guard<stdx::mutex> lk(_mx);
// Check the footer checksum before doing anything else.
if (_recovering) {
diff --git a/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp b/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
index 197cb45e844..e78c29281c7 100644
--- a/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
+++ b/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
@@ -85,7 +85,7 @@ namespace mongo {
/** de-register view. threadsafe */
void PointerToDurableMappedFile::remove(void *view, size_t len) {
if( view ) {
- boost::lock_guard<boost::mutex> lk(_m);
+ stdx::lock_guard<stdx::mutex> lk(_m);
clearWritableBits_inlock(view, len);
_views.erase(view);
}
@@ -93,7 +93,7 @@ namespace mongo {
#ifdef _WIN32
void PointerToDurableMappedFile::clearWritableBits(void *privateView, size_t len) {
- boost::lock_guard<boost::mutex> lk(_m);
+ stdx::lock_guard<stdx::mutex> lk(_m);
clearWritableBits_inlock(privateView, len);
}
@@ -110,7 +110,7 @@ namespace mongo {
extern mutex mapViewMutex;
__declspec(noinline) void PointerToDurableMappedFile::makeChunkWritable(size_t chunkno) {
- boost::lock_guard<boost::mutex> lkPrivateViews(_m);
+ stdx::lock_guard<stdx::mutex> lkPrivateViews(_m);
if (writable.get(chunkno)) // double check lock
return;
@@ -120,7 +120,7 @@ namespace mongo {
size_t chunkStart = chunkno * MemoryMappedCOWBitset::ChunkSize;
size_t chunkNext = chunkStart + MemoryMappedCOWBitset::ChunkSize;
- boost::lock_guard<boost::mutex> lkMapView(mapViewMutex);
+ stdx::lock_guard<stdx::mutex> lkMapView(mapViewMutex);
map<void*, DurableMappedFile*>::iterator i = _views.upper_bound((void*)(chunkNext - 1));
while (1) {
@@ -225,7 +225,7 @@ namespace mongo {
@return the DurableMappedFile to which this pointer belongs. null if not found.
*/
DurableMappedFile* PointerToDurableMappedFile::find(void *p, /*out*/ size_t& ofs) {
- boost::lock_guard<boost::mutex> lk(_m);
+ stdx::lock_guard<stdx::mutex> lk(_m);
return find_inlock(p, ofs);
}
@@ -267,7 +267,7 @@ namespace mongo {
LOG(3) << "mmf finishOpening " << (void*) _view_write << ' ' << filename() << " len:" << length();
if( _view_write ) {
if (storageGlobalParams.dur) {
- boost::lock_guard<boost::mutex> lk2(privateViews._mutex());
+ stdx::lock_guard<stdx::mutex> lk2(privateViews._mutex());
_view_private = createPrivateMap();
if( _view_private == 0 ) {
diff --git a/src/mongo/db/storage/mmap_v1/file_allocator.cpp b/src/mongo/db/storage/mmap_v1/file_allocator.cpp
index 17ea4b328b2..6fb7f61e7e3 100644
--- a/src/mongo/db/storage/mmap_v1/file_allocator.cpp
+++ b/src/mongo/db/storage/mmap_v1/file_allocator.cpp
@@ -125,7 +125,7 @@ namespace mongo {
}
void FileAllocator::requestAllocation( const string &name, long &size ) {
- boost::lock_guard<boost::mutex> lk( _pendingMutex );
+ stdx::lock_guard<stdx::mutex> lk( _pendingMutex );
if ( _failed )
return;
long oldSize = prevSize( name );
@@ -139,7 +139,7 @@ namespace mongo {
}
void FileAllocator::allocateAsap( const string &name, unsigned long long &size ) {
- boost::unique_lock<boost::mutex> lk( _pendingMutex );
+ stdx::unique_lock<stdx::mutex> lk( _pendingMutex );
// In case the allocator is in failed state, check once before starting so that subsequent
// requests for the same database would fail fast after the first one has failed.
@@ -172,7 +172,7 @@ namespace mongo {
void FileAllocator::waitUntilFinished() const {
if ( _failed )
return;
- boost::unique_lock<boost::mutex> lk( _pendingMutex );
+ stdx::unique_lock<stdx::mutex> lk( _pendingMutex );
while( _pending.size() != 0 )
_pendingUpdated.wait(lk);
}
@@ -359,7 +359,7 @@ namespace mongo {
}
while( 1 ) {
{
- boost::unique_lock<boost::mutex> lk( fa->_pendingMutex );
+ stdx::unique_lock<stdx::mutex> lk( fa->_pendingMutex );
if ( fa->_pending.size() == 0 )
fa->_pendingUpdated.wait(lk);
}
@@ -367,7 +367,7 @@ namespace mongo {
string name;
long size = 0;
{
- boost::lock_guard<boost::mutex> lk( fa->_pendingMutex );
+ stdx::lock_guard<stdx::mutex> lk( fa->_pendingMutex );
if ( fa->_pending.size() == 0 )
break;
name = fa->_pending.front();
@@ -439,20 +439,20 @@ namespace mongo {
}
{
- boost::lock_guard<boost::mutex> lk(fa->_pendingMutex);
+ stdx::lock_guard<stdx::mutex> lk(fa->_pendingMutex);
fa->_failed = true;
// TODO: Should we remove the file from pending?
fa->_pendingUpdated.notify_all();
}
-
-
+
+
sleepsecs(10);
continue;
}
{
- boost::lock_guard<boost::mutex> lk( fa->_pendingMutex );
+ stdx::lock_guard<stdx::mutex> lk( fa->_pendingMutex );
fa->_pendingSize.erase( name );
fa->_pending.pop_front();
fa->_pendingUpdated.notify_all();
diff --git a/src/mongo/db/storage/mmap_v1/mmap.h b/src/mongo/db/storage/mmap_v1/mmap.h
index 7b34b21b254..f70b64c96eb 100644
--- a/src/mongo/db/storage/mmap_v1/mmap.h
+++ b/src/mongo/db/storage/mmap_v1/mmap.h
@@ -241,7 +241,7 @@ namespace mongo {
// It ensures close() cannot complete while flush() is running
// Lock Ordering:
// LockMongoFilesShared must be taken before _flushMutex if both are taken
- boost::mutex _flushMutex;
+ stdx::mutex _flushMutex;
#endif
protected:
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
index 595df3616f3..b4550f135db 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
@@ -253,7 +253,7 @@ namespace {
DatabaseCatalogEntry* MMAPV1Engine::getDatabaseCatalogEntry( OperationContext* opCtx,
StringData db ) {
{
- boost::lock_guard<boost::mutex> lk(_entryMapMutex);
+ stdx::lock_guard<stdx::mutex> lk(_entryMapMutex);
EntryMap::const_iterator iter = _entryMap.find(db.toString());
if (iter != _entryMap.end()) {
return iter->second;
@@ -271,7 +271,7 @@ namespace {
storageGlobalParams.directoryperdb,
false);
- boost::lock_guard<boost::mutex> lk(_entryMapMutex);
+ stdx::lock_guard<stdx::mutex> lk(_entryMapMutex);
// Sanity check that we are not overwriting something
invariant(_entryMap.insert(EntryMap::value_type(db.toString(), entry)).second);
@@ -285,7 +285,7 @@ namespace {
// global journal entries occur, which happen to have write intents for the removed files.
getDur().syncDataAndTruncateJournal(txn);
- boost::lock_guard<boost::mutex> lk( _entryMapMutex );
+ stdx::lock_guard<stdx::mutex> lk( _entryMapMutex );
MMAPV1DatabaseCatalogEntry* entry = _entryMap[db.toString()];
delete entry;
_entryMap.erase( db.toString() );
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h
index 461a1ae3108..4141794c426 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h
@@ -32,10 +32,9 @@
#include <map>
-#include <boost/thread/mutex.hpp>
-
-#include "mongo/db/storage/storage_engine.h"
#include "mongo/db/storage/mmap_v1/record_access_tracker.h"
+#include "mongo/db/storage/storage_engine.h"
+#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -92,7 +91,7 @@ namespace mongo {
static void _listDatabases( const std::string& directory,
std::vector<std::string>* out );
- boost::mutex _entryMapMutex;
+ stdx::mutex _entryMapMutex;
typedef std::map<std::string,MMAPV1DatabaseCatalogEntry*> EntryMap;
EntryMap _entryMap;
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
index d96b8f69b88..ed4f160e1a9 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
@@ -659,7 +659,7 @@ namespace mongo {
}
void MmapV1ExtentManager::FilesArray::push_back(DataFile* val) {
- boost::lock_guard<boost::mutex> lk(_writersMutex);
+ stdx::lock_guard<stdx::mutex> lk(_writersMutex);
const int n = _size.load();
invariant(n < DiskLoc::MaxFiles);
// Note ordering: _size update must come after updating the _files array
diff --git a/src/mongo/db/storage/mmap_v1/mmap_windows.cpp b/src/mongo/db/storage/mmap_v1/mmap_windows.cpp
index 2ce46d43584..2b57dc4672b 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_windows.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_windows.cpp
@@ -162,10 +162,10 @@ namespace mongo {
LockMongoFilesShared::assertExclusivelyLocked();
// Prevent flush and close from concurrently running
- boost::lock_guard<boost::mutex> lk(_flushMutex);
+ stdx::lock_guard<stdx::mutex> lk(_flushMutex);
{
- boost::lock_guard<boost::mutex> lk(mapViewMutex);
+ stdx::lock_guard<stdx::mutex> lk(mapViewMutex);
for (vector<void*>::iterator i = views.begin(); i != views.end(); i++) {
UnmapViewOfFile(*i);
@@ -187,7 +187,7 @@ namespace mongo {
void* MemoryMappedFile::createReadOnlyMap() {
verify( maphandle );
- boost::lock_guard<boost::mutex> lk(mapViewMutex);
+ stdx::lock_guard<stdx::mutex> lk(mapViewMutex);
void* readOnlyMapAddress = NULL;
int current_retry = 0;
@@ -299,7 +299,7 @@ namespace mongo {
void *view = 0;
{
- boost::lock_guard<boost::mutex> lk(mapViewMutex);
+ stdx::lock_guard<stdx::mutex> lk(mapViewMutex);
DWORD access = ( options & READONLY ) ? FILE_MAP_READ : FILE_MAP_ALL_ACCESS;
int current_retry = 0;
@@ -364,7 +364,7 @@ namespace mongo {
void* MemoryMappedFile::createPrivateMap() {
verify( maphandle );
- boost::lock_guard<boost::mutex> lk(mapViewMutex);
+ stdx::lock_guard<stdx::mutex> lk(mapViewMutex);
LPVOID thisAddress = getNextMemoryMappedFileLocation( len );
@@ -412,7 +412,7 @@ namespace mongo {
privateViews.clearWritableBits(oldPrivateAddr, len);
- boost::lock_guard<boost::mutex> lk(mapViewMutex);
+ stdx::lock_guard<stdx::mutex> lk(mapViewMutex);
if( !UnmapViewOfFile(oldPrivateAddr) ) {
DWORD dosError = GetLastError();
@@ -448,7 +448,7 @@ namespace mongo {
HANDLE fd,
const uint64_t id,
const std::string& filename,
- boost::mutex& flushMutex )
+ stdx::mutex& flushMutex )
: _theFile(theFile), _view(view), _fd(fd), _id(id), _filename(filename),
_flushMutex(flushMutex)
{}
@@ -471,7 +471,7 @@ namespace mongo {
_flushMutex.lock();
}
- boost::lock_guard<boost::mutex> lk(_flushMutex, boost::adopt_lock_t());
+ stdx::lock_guard<stdx::mutex> lk(_flushMutex, stdx::adopt_lock);
int loopCount = 0;
bool success = false;
@@ -520,7 +520,7 @@ namespace mongo {
HANDLE _fd;
const uint64_t _id;
string _filename;
- boost::mutex& _flushMutex;
+ stdx::mutex& _flushMutex;
};
void MemoryMappedFile::flush(bool sync) {
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index d7df41b8098..f123ff1fffd 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -342,7 +342,7 @@ namespace mongo {
if ( ret == EBUSY ) {
// this is expected, queue it up
{
- boost::lock_guard<boost::mutex> lk( _identToDropMutex );
+ stdx::lock_guard<stdx::mutex> lk( _identToDropMutex );
_identToDrop.insert( uri );
}
_sessionCache->closeAll();
@@ -358,14 +358,14 @@ namespace mongo {
_sizeStorerSyncTracker.resetLastTime();
syncSizeInfo(false);
}
- boost::lock_guard<boost::mutex> lk( _identToDropMutex );
+ stdx::lock_guard<stdx::mutex> lk( _identToDropMutex );
return !_identToDrop.empty();
}
void WiredTigerKVEngine::dropAllQueued() {
set<string> mine;
{
- boost::lock_guard<boost::mutex> lk( _identToDropMutex );
+ stdx::lock_guard<stdx::mutex> lk( _identToDropMutex );
mine = _identToDrop;
}
@@ -393,7 +393,7 @@ namespace mongo {
}
{
- boost::lock_guard<boost::mutex> lk( _identToDropMutex );
+ stdx::lock_guard<stdx::mutex> lk( _identToDropMutex );
for ( set<string>::const_iterator it = deleted.begin(); it != deleted.end(); ++it ) {
_identToDrop.erase( *it );
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
index 739084bdd06..dd40faa4ccf 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
@@ -34,13 +34,12 @@
#include <set>
#include <string>
-#include <boost/thread/mutex.hpp>
-
#include <wiredtiger.h>
#include "mongo/bson/ordering.h"
#include "mongo/db/storage/kv/kv_engine.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/elapsed_tracker.h"
namespace mongo {
@@ -147,7 +146,7 @@ namespace mongo {
std::string _indexOptions;
std::set<std::string> _identToDrop;
- mutable boost::mutex _identToDropMutex;
+ mutable stdx::mutex _identToDropMutex;
std::unique_ptr<WiredTigerSizeStorer> _sizeStorer;
std::string _sizeStorerUri;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
index c3f2c307666..7ac2a7ced80 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
@@ -449,7 +449,7 @@ namespace {
WiredTigerRecordStore::~WiredTigerRecordStore() {
{
- boost::lock_guard<boost::timed_mutex> lk(_cappedDeleterMutex);
+ stdx::lock_guard<boost::timed_mutex> lk(_cappedDeleterMutex);
_shuttingDown = true;
}
@@ -464,7 +464,7 @@ namespace {
}
bool WiredTigerRecordStore::inShutdown() const {
- boost::lock_guard<boost::timed_mutex> lk(_cappedDeleterMutex);
+ stdx::lock_guard<boost::timed_mutex> lk(_cappedDeleterMutex);
return _shuttingDown;
}
@@ -591,7 +591,7 @@ namespace {
return 0;
// ensure only one thread at a time can do deletes, otherwise they'll conflict.
- boost::unique_lock<boost::timed_mutex> lock(_cappedDeleterMutex, boost::defer_lock);
+ stdx::unique_lock<boost::timed_mutex> lock(_cappedDeleterMutex, stdx::defer_lock);
if (_cappedMaxDocs != -1) {
lock.lock(); // Max docs has to be exact, so have to check every time.
@@ -772,14 +772,14 @@ namespace {
return status;
loc = status.getValue();
if ( loc > _oplog_highestSeen ) {
- boost::lock_guard<boost::mutex> lk( _uncommittedDiskLocsMutex );
+ stdx::lock_guard<stdx::mutex> lk( _uncommittedDiskLocsMutex );
if ( loc > _oplog_highestSeen ) {
_oplog_highestSeen = loc;
}
}
}
else if ( _isCapped ) {
- boost::lock_guard<boost::mutex> lk( _uncommittedDiskLocsMutex );
+ stdx::lock_guard<stdx::mutex> lk( _uncommittedDiskLocsMutex );
loc = _nextId();
_addUncommitedDiskLoc_inlock( txn, loc );
}
@@ -809,7 +809,7 @@ namespace {
}
void WiredTigerRecordStore::dealtWithCappedLoc( const RecordId& loc ) {
- boost::lock_guard<boost::mutex> lk( _uncommittedDiskLocsMutex );
+ stdx::lock_guard<stdx::mutex> lk( _uncommittedDiskLocsMutex );
SortedDiskLocs::iterator it = std::find(_uncommittedDiskLocs.begin(),
_uncommittedDiskLocs.end(),
loc);
@@ -818,7 +818,7 @@ namespace {
}
bool WiredTigerRecordStore::isCappedHidden( const RecordId& loc ) const {
- boost::lock_guard<boost::mutex> lk( _uncommittedDiskLocsMutex );
+ stdx::lock_guard<stdx::mutex> lk( _uncommittedDiskLocsMutex );
if (_uncommittedDiskLocs.empty()) {
return false;
}
@@ -882,7 +882,7 @@ namespace {
}
void WiredTigerRecordStore::_oplogSetStartHack( WiredTigerRecoveryUnit* wru ) const {
- boost::lock_guard<boost::mutex> lk( _uncommittedDiskLocsMutex );
+ stdx::lock_guard<stdx::mutex> lk( _uncommittedDiskLocsMutex );
if ( _uncommittedDiskLocs.empty() ) {
wru->setOplogReadTill( _oplog_highestSeen );
}
@@ -1072,7 +1072,7 @@ namespace {
if ( !loc.isOK() )
return loc.getStatus();
- boost::lock_guard<boost::mutex> lk( _uncommittedDiskLocsMutex );
+ stdx::lock_guard<stdx::mutex> lk( _uncommittedDiskLocsMutex );
_addUncommitedDiskLoc_inlock( txn, loc.getValue() );
return Status::OK();
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
index 959c9583a3a..8fc785963c5 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
@@ -34,12 +34,11 @@
#include <set>
#include <string>
-#include <boost/thread/mutex.hpp>
-
#include "mongo/db/catalog/collection_options.h"
-#include "mongo/db/storage/record_store.h"
#include "mongo/db/storage/capped_callback.h"
+#include "mongo/db/storage/record_store.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/fail_point_service.h"
/**
@@ -254,7 +253,7 @@ namespace mongo {
SortedDiskLocs _uncommittedDiskLocs;
RecordId _oplog_visibleTo;
RecordId _oplog_highestSeen;
- mutable boost::mutex _uncommittedDiskLocsMutex;
+ mutable stdx::mutex _uncommittedDiskLocsMutex;
AtomicInt64 _nextIdNum;
AtomicInt64 _dataSize;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp
index 22829ecc6ac..0d5314a323e 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp
@@ -31,7 +31,6 @@
#include "mongo/platform/basic.h"
-#include <boost/thread/mutex.hpp>
#include <set>
#include "mongo/base/checked_cast.h"
@@ -40,12 +39,13 @@
#include "mongo/db/client.h"
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/db_raii.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context_impl.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/background.h"
#include "mongo/util/exit.h"
#include "mongo/util/log.h"
@@ -55,7 +55,7 @@ namespace mongo {
namespace {
std::set<NamespaceString> _backgroundThreadNamespaces;
- boost::mutex _backgroundThreadMutex;
+ stdx::mutex _backgroundThreadMutex;
class WiredTigerRecordStoreThread : public BackgroundJob {
public:
@@ -101,7 +101,7 @@ namespace mongo {
WiredTigerRecordStore* rs =
checked_cast<WiredTigerRecordStore*>(collection->getRecordStore());
WriteUnitOfWork wuow(&txn);
- boost::lock_guard<boost::timed_mutex> lock(rs->cappedDeleterMutex());
+ stdx::lock_guard<boost::timed_mutex> lock(rs->cappedDeleterMutex());
int64_t removed = rs->cappedDeleteAsNeeded_inlock(&txn, RecordId::max());
wuow.commit();
return removed;
@@ -155,7 +155,7 @@ namespace mongo {
return false;
}
- boost::lock_guard<boost::mutex> lock(_backgroundThreadMutex);
+ stdx::lock_guard<stdx::mutex> lock(_backgroundThreadMutex);
NamespaceString nss(ns);
if (_backgroundThreadNamespaces.count(nss)) {
log() << "WiredTigerRecordStoreThread " << ns << " already started";
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
index aeb220ebfbb..19a128b7b98 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
@@ -31,7 +31,6 @@
#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage
#include <boost/thread/condition.hpp>
-#include <boost/thread/mutex.hpp>
#include "mongo/base/checked_cast.h"
#include "mongo/base/init.h"
@@ -41,6 +40,7 @@
#include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_util.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/ticketholder.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
@@ -56,14 +56,14 @@ namespace mongo {
}
void syncHappend() {
- boost::lock_guard<boost::mutex> lk( mutex );
+ stdx::lock_guard<stdx::mutex> lk( mutex );
lastSyncTime++;
condvar.notify_all();
}
// return true if happened
bool waitUntilDurable() {
- boost::unique_lock<boost::mutex> lk( mutex );
+ stdx::unique_lock<stdx::mutex> lk( mutex );
long long start = lastSyncTime;
numWaitingForSync.fetchAndAdd(1);
condvar.timed_wait(lk,boost::posix_time::milliseconds(50));
@@ -73,7 +73,7 @@ namespace mongo {
AtomicUInt32 numWaitingForSync;
- boost::mutex mutex; // this just protects lastSyncTime
+ stdx::mutex mutex; // this just protects lastSyncTime
boost::condition condvar;
long long lastSyncTime;
} waitUntilDurableData;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp
index 7673bcddbac..92c5415a67a 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp
@@ -145,7 +145,7 @@ namespace mongo {
// This ensures that any calls, which are currently inside of getSession/releaseSession
// will be able to complete before we start cleaning up the pool. Any others, which are
// about to enter will return immediately because of _shuttingDown == true.
- boost::lock_guard<boost::shared_mutex> lk(_shutdownLock);
+ stdx::lock_guard<boost::shared_mutex> lk(_shutdownLock);
}
closeAll();
@@ -156,7 +156,7 @@ namespace mongo {
SessionPool swapPool;
{
- boost::unique_lock<SpinLock> scopedLock(_cache[i].lock);
+ stdx::unique_lock<SpinLock> scopedLock(_cache[i].lock);
_cache[i].pool.swap(swapPool);
_cache[i].epoch++;
}
@@ -183,7 +183,7 @@ namespace mongo {
int epoch;
{
- boost::unique_lock<SpinLock> cachePartitionLock(_cache[cachePartition].lock);
+ stdx::unique_lock<SpinLock> cachePartitionLock(_cache[cachePartition].lock);
epoch = _cache[cachePartition].epoch;
if (!_cache[cachePartition].pool.empty()) {
@@ -224,7 +224,7 @@ namespace mongo {
bool returnedToCache = false;
if (cachePartition >= 0) {
- boost::unique_lock<SpinLock> cachePartitionLock(_cache[cachePartition].lock);
+ stdx::unique_lock<SpinLock> cachePartitionLock(_cache[cachePartition].lock);
invariant(session->_getEpoch() <= _cache[cachePartition].epoch);
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h
index 7bf62d6efe7..2f9e8d64d4d 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h
@@ -35,11 +35,10 @@
#include <string>
#include <vector>
-#include <boost/thread/mutex.hpp>
#include <boost/thread/shared_mutex.hpp>
-
#include <wiredtiger.h>
+#include "mongo/stdx/mutex.h"
#include "mongo/platform/atomic_word.h"
#include "mongo/util/concurrency/spin_lock.h"
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp
index 4ce57cf5104..5310fb6836a 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp
@@ -74,7 +74,7 @@ namespace mongo {
WiredTigerSizeStorer::~WiredTigerSizeStorer() {
// This shouldn't be necessary, but protects us if we screw up.
- boost::lock_guard<boost::mutex> cursorLock( _cursorMutex );
+ stdx::lock_guard<stdx::mutex> cursorLock( _cursorMutex );
_magic = 11111;
_cursor->close(_cursor);
@@ -90,7 +90,7 @@ namespace mongo {
void WiredTigerSizeStorer::onCreate( WiredTigerRecordStore* rs,
long long numRecords, long long dataSize ) {
_checkMagic();
- boost::lock_guard<boost::mutex> lk( _entriesMutex );
+ stdx::lock_guard<stdx::mutex> lk( _entriesMutex );
Entry& entry = _entries[rs->getURI()];
entry.rs = rs;
entry.numRecords = numRecords;
@@ -100,7 +100,7 @@ namespace mongo {
void WiredTigerSizeStorer::onDestroy( WiredTigerRecordStore* rs ) {
_checkMagic();
- boost::lock_guard<boost::mutex> lk( _entriesMutex );
+ stdx::lock_guard<stdx::mutex> lk( _entriesMutex );
Entry& entry = _entries[rs->getURI()];
entry.numRecords = rs->numRecords( NULL );
entry.dataSize = rs->dataSize( NULL );
@@ -112,7 +112,7 @@ namespace mongo {
void WiredTigerSizeStorer::storeToCache( StringData uri,
long long numRecords, long long dataSize ) {
_checkMagic();
- boost::lock_guard<boost::mutex> lk( _entriesMutex );
+ stdx::lock_guard<stdx::mutex> lk( _entriesMutex );
Entry& entry = _entries[uri.toString()];
entry.numRecords = numRecords;
entry.dataSize = dataSize;
@@ -122,7 +122,7 @@ namespace mongo {
void WiredTigerSizeStorer::loadFromCache( StringData uri,
long long* numRecords, long long* dataSize ) const {
_checkMagic();
- boost::lock_guard<boost::mutex> lk( _entriesMutex );
+ stdx::lock_guard<stdx::mutex> lk( _entriesMutex );
Map::const_iterator it = _entries.find( uri.toString() );
if ( it == _entries.end() ) {
*numRecords = 0;
@@ -134,7 +134,7 @@ namespace mongo {
}
void WiredTigerSizeStorer::fillCache() {
- boost::lock_guard<boost::mutex> cursorLock( _cursorMutex );
+ stdx::lock_guard<stdx::mutex> cursorLock( _cursorMutex );
_checkMagic();
Map m;
@@ -166,17 +166,17 @@ namespace mongo {
}
}
- boost::lock_guard<boost::mutex> lk( _entriesMutex );
+ stdx::lock_guard<stdx::mutex> lk( _entriesMutex );
_entries.swap(m);
}
void WiredTigerSizeStorer::syncCache(bool syncToDisk) {
- boost::lock_guard<boost::mutex> cursorLock( _cursorMutex );
+ stdx::lock_guard<stdx::mutex> cursorLock( _cursorMutex );
_checkMagic();
Map myMap;
{
- boost::lock_guard<boost::mutex> lk( _entriesMutex );
+ stdx::lock_guard<stdx::mutex> lk( _entriesMutex );
for ( Map::iterator it = _entries.begin(); it != _entries.end(); ++it ) {
std::string uriKey = it->first;
Entry& entry = it->second;
@@ -231,7 +231,7 @@ namespace mongo {
invariantWTOK(session->commit_transaction(session, NULL));
{
- boost::lock_guard<boost::mutex> lk( _entriesMutex );
+ stdx::lock_guard<stdx::mutex> lk( _entriesMutex );
for (Map::iterator it = _entries.begin(); it != _entries.end(); ++it) {
it->second.dirty = false;
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h
index 5d856b705dc..488696424a0 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h
@@ -31,13 +31,13 @@
#pragma once
-#include <boost/thread/mutex.hpp>
#include <map>
#include <string>
#include <wiredtiger.h>
#include "mongo/base/string_data.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h"
+#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -80,13 +80,13 @@ namespace mongo {
int _magic;
// Guards _cursor. Acquire *before* _entriesMutex.
- mutable boost::mutex _cursorMutex;
+ mutable stdx::mutex _cursorMutex;
const WiredTigerSession _session;
WT_CURSOR* _cursor; // pointer is const after constructor
typedef std::map<std::string,Entry> Map;
Map _entries;
- mutable boost::mutex _entriesMutex;
+ mutable stdx::mutex _entriesMutex;
};
diff --git a/src/mongo/dbtests/documentsourcetests.cpp b/src/mongo/dbtests/documentsourcetests.cpp
index 07e4d2f840b..8713b8cd177 100644
--- a/src/mongo/dbtests/documentsourcetests.cpp
+++ b/src/mongo/dbtests/documentsourcetests.cpp
@@ -287,12 +287,12 @@ namespace DocumentSourceTests {
public:
PendingValue( int initialValue ) : _value( initialValue ) {}
void set( int newValue ) {
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
_value = newValue;
_condition.notify_all();
}
void await( int expectedValue ) const {
- boost::unique_lock<boost::mutex> lk( _mutex );
+ stdx::unique_lock<stdx::mutex> lk( _mutex );
while( _value != expectedValue ) {
_condition.wait( lk );
}
diff --git a/src/mongo/dbtests/framework.cpp b/src/mongo/dbtests/framework.cpp
index 3b405853665..dab30d47f74 100644
--- a/src/mongo/dbtests/framework.cpp
+++ b/src/mongo/dbtests/framework.cpp
@@ -78,7 +78,7 @@ namespace mongo {
std::string lastRunningTestName, currentTestName;
{
- boost::lock_guard<boost::mutex> lk( globalCurrentTestNameMutex );
+ stdx::lock_guard<stdx::mutex> lk( globalCurrentTestNameMutex );
lastRunningTestName = globalCurrentTestName;
}
@@ -87,7 +87,7 @@ namespace mongo {
minutesRunning++;
{
- boost::lock_guard<boost::mutex> lk( globalCurrentTestNameMutex );
+ stdx::lock_guard<stdx::mutex> lk( globalCurrentTestNameMutex );
currentTestName = globalCurrentTestName;
}
@@ -157,6 +157,6 @@ namespace ntservice {
} // namespace mongo
void mongo::unittest::onCurrentTestNameChange( const std::string &testName ) {
- boost::lock_guard<boost::mutex> lk( mongo::dbtests::globalCurrentTestNameMutex );
+ stdx::lock_guard<stdx::mutex> lk( mongo::dbtests::globalCurrentTestNameMutex );
mongo::dbtests::globalCurrentTestName = testName;
}
diff --git a/src/mongo/dbtests/mock/mock_conn_registry.cpp b/src/mongo/dbtests/mock/mock_conn_registry.cpp
index 67b27cb2316..ad6cacb760c 100644
--- a/src/mongo/dbtests/mock/mock_conn_registry.cpp
+++ b/src/mongo/dbtests/mock/mock_conn_registry.cpp
@@ -57,7 +57,7 @@ namespace mongo {
}
void MockConnRegistry::addServer(MockRemoteDBServer* server) {
- boost::lock_guard<boost::mutex> sl(_registryMutex);
+ stdx::lock_guard<stdx::mutex> sl(_registryMutex);
const std::string hostName(server->getServerAddress());
fassert(16533, _registry.count(hostName) == 0);
@@ -66,17 +66,17 @@ namespace mongo {
}
bool MockConnRegistry::removeServer(const std::string& hostName) {
- boost::lock_guard<boost::mutex> sl(_registryMutex);
+ stdx::lock_guard<stdx::mutex> sl(_registryMutex);
return _registry.erase(hostName) == 1;
}
void MockConnRegistry::clear() {
- boost::lock_guard<boost::mutex> sl(_registryMutex);
+ stdx::lock_guard<stdx::mutex> sl(_registryMutex);
_registry.clear();
}
MockDBClientConnection* MockConnRegistry::connect(const std::string& connStr) {
- boost::lock_guard<boost::mutex> sl(_registryMutex);
+ stdx::lock_guard<stdx::mutex> sl(_registryMutex);
fassert(16534, _registry.count(connStr) == 1);
return new MockDBClientConnection(_registry[connStr], true);
}
diff --git a/src/mongo/dbtests/perftests.cpp b/src/mongo/dbtests/perftests.cpp
index 012f8bfab94..a89930d016a 100644
--- a/src/mongo/dbtests/perftests.cpp
+++ b/src/mongo/dbtests/perftests.cpp
@@ -534,7 +534,7 @@ namespace PerfTests {
RWLock lk("testrw");
SimpleMutex m("simptst");
- boost::mutex mboost;
+ stdx::mutex mboost;
boost::timed_mutex mboost_timed;
std::mutex mstd;
std::timed_mutex mstd_timed;
@@ -552,11 +552,11 @@ namespace PerfTests {
};
class boostmutexspeed : public B {
public:
- string name() { return "boost::mutex"; }
+ string name() { return "stdx::mutex"; }
virtual int howLongMillis() { return 500; }
virtual bool showDurStats() { return false; }
void timed() {
- boost::lock_guard<boost::mutex> lk(mboost);
+ stdx::lock_guard<stdx::mutex> lk(mboost);
}
};
class boosttimed_mutexspeed : public B {
@@ -565,7 +565,7 @@ namespace PerfTests {
virtual int howLongMillis() { return 500; }
virtual bool showDurStats() { return false; }
void timed() {
- boost::lock_guard<boost::timed_mutex> lk(mboost_timed);
+ stdx::lock_guard<boost::timed_mutex> lk(mboost_timed);
}
};
class simplemutexspeed : public B {
@@ -645,7 +645,7 @@ namespace PerfTests {
boost::thread_specific_ptr<ResourceId> resId;
boost::thread_specific_ptr<MMAPV1LockerImpl> locker;
boost::thread_specific_ptr<int> id;
- boost::mutex lock;
+ stdx::mutex lock;
// The following members are intitialized in the constructor
LockMode lockMode;
diff --git a/src/mongo/dbtests/threadedtests.cpp b/src/mongo/dbtests/threadedtests.cpp
index 3ef74711c04..b0f5c60ab9c 100644
--- a/src/mongo/dbtests/threadedtests.cpp
+++ b/src/mongo/dbtests/threadedtests.cpp
@@ -757,14 +757,14 @@ namespace ThreadedTests {
Hotel( int nRooms ) : _nRooms( nRooms ), _checkedIn( 0 ), _maxRooms( 0 ) {}
void checkIn(){
- boost::lock_guard<boost::mutex> lk( _frontDesk );
+ stdx::lock_guard<stdx::mutex> lk( _frontDesk );
_checkedIn++;
verify( _checkedIn <= _nRooms );
if( _checkedIn > _maxRooms ) _maxRooms = _checkedIn;
}
void checkOut(){
- boost::lock_guard<boost::mutex> lk( _frontDesk );
+ stdx::lock_guard<stdx::mutex> lk( _frontDesk );
_checkedIn--;
verify( _checkedIn >= 0 );
}
diff --git a/src/mongo/executor/network_interface_impl.cpp b/src/mongo/executor/network_interface_impl.cpp
index b2475e4028a..95d51e7c264 100644
--- a/src/mongo/executor/network_interface_impl.cpp
+++ b/src/mongo/executor/network_interface_impl.cpp
@@ -67,7 +67,7 @@ namespace {
NetworkInterfaceImpl::~NetworkInterfaceImpl() { }
std::string NetworkInterfaceImpl::getDiagnosticString() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
str::stream output;
output << "NetworkImpl";
output << " threads:" << _threads.size();
@@ -107,7 +107,7 @@ namespace {
}
void NetworkInterfaceImpl::startup() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
invariant(!_inShutdown);
if (!_threads.empty()) {
return;
@@ -119,7 +119,7 @@ namespace {
void NetworkInterfaceImpl::shutdown() {
using std::swap;
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
_inShutdown = true;
_hasPending.notify_all();
ThreadList threadsToJoin;
@@ -132,7 +132,7 @@ namespace {
}
void NetworkInterfaceImpl::signalWorkAvailable() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_signalWorkAvailable_inlock();
}
@@ -144,7 +144,7 @@ namespace {
}
void NetworkInterfaceImpl::waitForWork() {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
while (!_isExecutorRunnable) {
_isExecutorRunnableCondition.wait(lk);
}
@@ -152,7 +152,7 @@ namespace {
}
void NetworkInterfaceImpl::waitForWorkUntil(Date_t when) {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
while (!_isExecutorRunnable) {
const Milliseconds waitTime(when - now());
if (waitTime <= Milliseconds(0)) {
@@ -177,7 +177,7 @@ namespace {
}
void NetworkInterfaceImpl::_consumeNetworkRequests() {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
while (!_inShutdown) {
if (_pending.empty()) {
if (_threads.size() > kMinThreads) {
@@ -232,7 +232,7 @@ namespace {
const RemoteCommandCompletionFn& onFinish) {
LOG(2) << "Scheduling " << request.cmdObj.firstElementFieldName() << " to " <<
request.target;
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_pending.push_back(CommandData());
CommandData& cd = _pending.back();
cd.cbHandle = cbHandle;
@@ -249,7 +249,7 @@ namespace {
void NetworkInterfaceImpl::cancelCommand(
const TaskExecutor::CallbackHandle& cbHandle) {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
CommandDataList::iterator iter;
for (iter = _pending.begin(); iter != _pending.end(); ++iter) {
if (iter->cbHandle == cbHandle) {
diff --git a/src/mongo/executor/network_interface_impl.h b/src/mongo/executor/network_interface_impl.h
index 1d513ba6308..14a421cc8a9 100644
--- a/src/mongo/executor/network_interface_impl.h
+++ b/src/mongo/executor/network_interface_impl.h
@@ -31,12 +31,12 @@
#include <boost/thread.hpp>
#include <boost/thread/condition_variable.hpp>
-#include <boost/thread/mutex.hpp>
#include <vector>
#include "mongo/client/remote_command_runner_impl.h"
#include "mongo/executor/network_interface.h"
#include "mongo/stdx/list.h"
+#include "mongo/stdx/mutex.h"
namespace mongo {
namespace executor {
@@ -121,7 +121,7 @@ namespace executor {
// Mutex guarding the state of this network interface, except for the remote command
// executor, which has its own concurrency control.
- boost::mutex _mutex;
+ stdx::mutex _mutex;
// Condition signaled to indicate that there is work in the _pending queue.
boost::condition_variable _hasPending;
diff --git a/src/mongo/executor/network_interface_mock.cpp b/src/mongo/executor/network_interface_mock.cpp
index f3f5c661c7c..6f13f42afd5 100644
--- a/src/mongo/executor/network_interface_mock.cpp
+++ b/src/mongo/executor/network_interface_mock.cpp
@@ -46,7 +46,7 @@ namespace executor {
}
NetworkInterfaceMock::~NetworkInterfaceMock() {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
invariant(!_hasStarted || _inShutdown);
invariant(_scheduled.empty());
invariant(_blackHoled.empty());
@@ -58,7 +58,7 @@ namespace executor {
}
Date_t NetworkInterfaceMock::now() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
return _now_inlock();
}
@@ -67,7 +67,7 @@ namespace executor {
const RemoteCommandRequest& request,
const RemoteCommandCompletionFn& onFinish) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
invariant(!_inShutdown);
const Date_t now = _now_inlock();
NetworkOperationIterator insertBefore = _unscheduled.begin();
@@ -97,7 +97,7 @@ namespace executor {
void NetworkInterfaceMock::cancelCommand(
const TaskExecutor::CallbackHandle& cbHandle) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
invariant(!_inShutdown);
stdx::function<bool (const NetworkOperation&)> matchesHandle = stdx::bind(
&NetworkOperation::isForCallback,
@@ -117,7 +117,7 @@ namespace executor {
}
void NetworkInterfaceMock::startup() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
invariant(!_hasStarted);
_hasStarted = true;
_inShutdown = false;
@@ -126,7 +126,7 @@ namespace executor {
}
void NetworkInterfaceMock::shutdown() {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
invariant(_hasStarted);
invariant(!_inShutdown);
_inShutdown = true;
@@ -152,7 +152,7 @@ namespace executor {
}
void NetworkInterfaceMock::enterNetwork() {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
while (!_isNetworkThreadRunnable_inlock()) {
_shouldWakeNetworkCondition.wait(lk);
}
@@ -161,7 +161,7 @@ namespace executor {
}
void NetworkInterfaceMock::exitNetwork() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_currentlyRunning != kNetworkThread) {
return;
}
@@ -173,7 +173,7 @@ namespace executor {
}
bool NetworkInterfaceMock::hasReadyRequests() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
invariant(_currentlyRunning == kNetworkThread);
return _hasReadyRequests_inlock();
}
@@ -188,7 +188,7 @@ namespace executor {
}
NetworkInterfaceMock::NetworkOperationIterator NetworkInterfaceMock::getNextReadyRequest() {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
invariant(_currentlyRunning == kNetworkThread);
while (!_hasReadyRequests_inlock()) {
_waitingToRunMask |= kExecutorThread;
@@ -204,7 +204,7 @@ namespace executor {
Date_t when,
const TaskExecutor::ResponseStatus& response) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
invariant(_currentlyRunning == kNetworkThread);
NetworkOperationIterator insertBefore = _scheduled.begin();
while ((insertBefore != _scheduled.end()) && (insertBefore->getResponseDate() <= when)) {
@@ -215,13 +215,13 @@ namespace executor {
}
void NetworkInterfaceMock::blackHole(NetworkOperationIterator noi) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
invariant(_currentlyRunning == kNetworkThread);
_blackHoled.splice(_blackHoled.end(), _processing, noi);
}
void NetworkInterfaceMock::requeueAt(NetworkOperationIterator noi, Date_t dontAskUntil) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
invariant(_currentlyRunning == kNetworkThread);
invariant(noi->getNextConsiderationDate() < dontAskUntil);
invariant(_now_inlock() < dontAskUntil);
@@ -236,7 +236,7 @@ namespace executor {
}
void NetworkInterfaceMock::runUntil(Date_t until) {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
invariant(_currentlyRunning == kNetworkThread);
invariant(until > _now_inlock());
while (until > _now_inlock()) {
@@ -259,19 +259,19 @@ namespace executor {
}
void NetworkInterfaceMock::runReadyNetworkOperations() {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
invariant(_currentlyRunning == kNetworkThread);
_runReadyNetworkOperations_inlock(&lk);
}
void NetworkInterfaceMock::waitForWork() {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
invariant(_currentlyRunning == kExecutorThread);
_waitForWork_inlock(&lk);
}
void NetworkInterfaceMock::waitForWorkUntil(Date_t when) {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
invariant(_currentlyRunning == kExecutorThread);
_executorNextWakeupDate = when;
if (_executorNextWakeupDate <= _now_inlock()) {
@@ -281,7 +281,7 @@ namespace executor {
}
void NetworkInterfaceMock::signalWorkAvailable() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_waitingToRunMask |= kExecutorThread;
if (_currentlyRunning == kNoThread) {
_shouldWakeExecutorCondition.notify_one();
@@ -289,7 +289,7 @@ namespace executor {
}
void NetworkInterfaceMock::_runReadyNetworkOperations_inlock(
- boost::unique_lock<boost::mutex>* lk) {
+ stdx::unique_lock<stdx::mutex>* lk) {
while (!_scheduled.empty() && _scheduled.front().getResponseDate() <= _now_inlock()) {
invariant(_currentlyRunning == kNetworkThread);
NetworkOperation op = _scheduled.front();
@@ -312,7 +312,7 @@ namespace executor {
_waitingToRunMask &= ~kNetworkThread;
}
- void NetworkInterfaceMock::_waitForWork_inlock(boost::unique_lock<boost::mutex>* lk) {
+ void NetworkInterfaceMock::_waitForWork_inlock(stdx::unique_lock<stdx::mutex>* lk) {
if (_waitingToRunMask & kExecutorThread) {
_waitingToRunMask &= ~kExecutorThread;
return;
diff --git a/src/mongo/executor/network_interface_mock.h b/src/mongo/executor/network_interface_mock.h
index 8002e2adfee..d7709300bd5 100644
--- a/src/mongo/executor/network_interface_mock.h
+++ b/src/mongo/executor/network_interface_mock.h
@@ -28,12 +28,12 @@
#pragma once
-#include <boost/thread/mutex.hpp>
#include <boost/thread/condition_variable.hpp>
#include <map>
#include "mongo/executor/network_interface.h"
#include "mongo/stdx/list.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/time_support.h"
namespace mongo {
@@ -180,7 +180,7 @@ namespace executor {
/**
* Implementation of waitForWork*.
*/
- void _waitForWork_inlock(boost::unique_lock<boost::mutex>* lk);
+ void _waitForWork_inlock(stdx::unique_lock<stdx::mutex>* lk);
/**
* Returns true if there are ready requests for the network thread to service.
@@ -202,12 +202,12 @@ namespace executor {
* reaquire "lk" several times, but will not return until the executor has blocked
* in waitFor*.
*/
- void _runReadyNetworkOperations_inlock(boost::unique_lock<boost::mutex>* lk);
+ void _runReadyNetworkOperations_inlock(stdx::unique_lock<stdx::mutex>* lk);
// Mutex that synchronizes access to mutable data in this class and its subclasses.
// Fields guarded by the mutex are labled (M), below, and those that are read-only
// in multi-threaded execution, and so unsynchronized, are labeled (R).
- boost::mutex _mutex;
+ stdx::mutex _mutex;
// Condition signaled to indicate that the network processing thread should wake up.
boost::condition_variable _shouldWakeNetworkCondition; // (M)
diff --git a/src/mongo/logger/console.cpp b/src/mongo/logger/console.cpp
index f8874855337..19ac32793e7 100644
--- a/src/mongo/logger/console.cpp
+++ b/src/mongo/logger/console.cpp
@@ -43,13 +43,13 @@ namespace {
*
* At process start, the loader initializes "consoleMutex" to NULL. At some point during static
* initialization, the static initialization process, running in the one and only extant thread,
- * allocates a new boost::mutex on the heap and assigns consoleMutex to point to it. While
+ * allocates a new stdx::mutex on the heap and assigns consoleMutex to point to it. While
* consoleMutex is still NULL, we know that there is only one thread extant, so it is safe to
* skip locking the consoleMutex in the Console constructor. Once the mutex is initialized,
* users of Console can start acquiring it.
*/
- boost::mutex *consoleMutex = new boost::mutex;
+ stdx::mutex *consoleMutex = new stdx::mutex;
#if defined(_WIN32)
/**
@@ -235,7 +235,7 @@ std::ostream* windowsOutputStream = getWindowsOutputStream();
Console::Console() : _consoleLock() {
if (consoleMutex) {
- boost::unique_lock<boost::mutex> lk(*consoleMutex);
+ stdx::unique_lock<stdx::mutex> lk(*consoleMutex);
lk.swap(_consoleLock);
}
}
diff --git a/src/mongo/logger/console.h b/src/mongo/logger/console.h
index 4392e2ad74d..b86cb7f984f 100644
--- a/src/mongo/logger/console.h
+++ b/src/mongo/logger/console.h
@@ -27,9 +27,10 @@
#pragma once
-#include <boost/thread/mutex.hpp>
#include <iosfwd>
+#include "mongo/stdx/mutex.h"
+
namespace mongo {
/**
@@ -53,7 +54,7 @@ namespace mongo {
std::ostream& out();
private:
- boost::unique_lock<boost::mutex> _consoleLock;
+ stdx::unique_lock<stdx::mutex> _consoleLock;
};
} // namespace mongo
diff --git a/src/mongo/logger/ramlog.cpp b/src/mongo/logger/ramlog.cpp
index 0ea9e324d96..30e623983d6 100644
--- a/src/mongo/logger/ramlog.cpp
+++ b/src/mongo/logger/ramlog.cpp
@@ -59,7 +59,7 @@ namespace {
RamLog::~RamLog() {}
void RamLog::write(const std::string& str) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_lastWrite = time(0);
_totalLinesWritten++;
@@ -209,7 +209,7 @@ namespace {
_namedLock = new mongo::mutex();
}
- boost::lock_guard<boost::mutex> lk( *_namedLock );
+ stdx::lock_guard<stdx::mutex> lk( *_namedLock );
if (!_named) {
// Guaranteed to happen before multi-threaded operation.
_named = new RM();
@@ -226,7 +226,7 @@ namespace {
RamLog* RamLog::getIfExists(const std::string& name) {
if (!_named)
return NULL;
- boost::lock_guard<boost::mutex> lk(*_namedLock);
+ stdx::lock_guard<stdx::mutex> lk(*_namedLock);
return mapFindWithDefault(*_named, name, static_cast<RamLog*>(NULL));
}
@@ -234,7 +234,7 @@ namespace {
if ( ! _named )
return;
- boost::lock_guard<boost::mutex> lk( *_namedLock );
+ stdx::lock_guard<stdx::mutex> lk( *_namedLock );
for ( RM::iterator i=_named->begin(); i!=_named->end(); ++i ) {
if ( i->second->n )
names.push_back( i->first );
diff --git a/src/mongo/logger/ramlog.h b/src/mongo/logger/ramlog.h
index 39aecf6b54c..60e401e92f1 100644
--- a/src/mongo/logger/ramlog.h
+++ b/src/mongo/logger/ramlog.h
@@ -34,17 +34,14 @@
#include <vector>
#include <boost/version.hpp>
-#if BOOST_VERSION >= 105300
-#include <boost/thread/lock_guard.hpp>
-#endif
-
#include "mongo/base/disallow_copying.h"
-#include "mongo/base/string_data.h"
#include "mongo/base/status.h"
-#include "mongo/util/concurrency/mutex.h"
+#include "mongo/base/string_data.h"
#include "mongo/logger/appender.h"
#include "mongo/logger/message_event.h"
#include "mongo/logger/tee.h"
+#include "mongo/stdx/mutex.h"
+#include "mongo/util/concurrency/mutex.h"
namespace mongo {
@@ -121,7 +118,7 @@ namespace mongo {
const char* getLine_inlock(unsigned lineNumber) const;
- boost::mutex _mutex; // Guards all non-static data.
+ stdx::mutex _mutex; // Guards all non-static data.
char lines[N][C];
unsigned h; // current position
unsigned n; // number of lines stores 0 o N
@@ -168,7 +165,7 @@ namespace mongo {
private:
const RamLog* _ramlog;
- boost::lock_guard<boost::mutex> _lock;
+ stdx::lock_guard<stdx::mutex> _lock;
unsigned _nextLineIndex;
};
diff --git a/src/mongo/logger/rotatable_file_writer.h b/src/mongo/logger/rotatable_file_writer.h
index aee5af4ed42..c6ad0c364d7 100644
--- a/src/mongo/logger/rotatable_file_writer.h
+++ b/src/mongo/logger/rotatable_file_writer.h
@@ -27,11 +27,11 @@
#pragma once
-#include <boost/thread/mutex.hpp>
#include <string>
#include "mongo/base/disallow_copying.h"
#include "mongo/base/status.h"
+#include "mongo/stdx/mutex.h"
namespace mongo {
namespace logger {
@@ -109,7 +109,7 @@ namespace logger {
Status _openFileStream(bool append);
RotatableFileWriter* _writer;
- boost::unique_lock<boost::mutex> _lock;
+ stdx::unique_lock<stdx::mutex> _lock;
};
/**
@@ -119,7 +119,7 @@ namespace logger {
private:
friend class RotatableFileWriter::Use;
- boost::mutex _mutex;
+ stdx::mutex _mutex;
std::string _fileName;
std::unique_ptr<std::ostream> _stream;
};
diff --git a/src/mongo/s/catalog/catalog_cache.cpp b/src/mongo/s/catalog/catalog_cache.cpp
index ec32162f69a..db27e5b7778 100644
--- a/src/mongo/s/catalog/catalog_cache.cpp
+++ b/src/mongo/s/catalog/catalog_cache.cpp
@@ -49,7 +49,7 @@ namespace mongo {
}
StatusWith<shared_ptr<DBConfig>> CatalogCache::getDatabase(const string& dbName) {
- boost::lock_guard<boost::mutex> guard(_mutex);
+ stdx::lock_guard<stdx::mutex> guard(_mutex);
ShardedDatabasesMap::iterator it = _databases.find(dbName);
if (it != _databases.end()) {
@@ -71,7 +71,7 @@ namespace mongo {
}
void CatalogCache::invalidate(const string& dbName) {
- boost::lock_guard<boost::mutex> guard(_mutex);
+ stdx::lock_guard<stdx::mutex> guard(_mutex);
ShardedDatabasesMap::iterator it = _databases.find(dbName);
if (it != _databases.end()) {
@@ -80,7 +80,7 @@ namespace mongo {
}
void CatalogCache::invalidateAll() {
- boost::lock_guard<boost::mutex> guard(_mutex);
+ stdx::lock_guard<stdx::mutex> guard(_mutex);
_databases.clear();
}
diff --git a/src/mongo/s/catalog/catalog_cache.h b/src/mongo/s/catalog/catalog_cache.h
index 3f646100729..9d7c18cad76 100644
--- a/src/mongo/s/catalog/catalog_cache.h
+++ b/src/mongo/s/catalog/catalog_cache.h
@@ -28,11 +28,11 @@
#pragma once
-#include <boost/thread/mutex.hpp>
#include <map>
#include <string>
#include "mongo/base/disallow_copying.h"
+#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -81,7 +81,7 @@ namespace mongo {
CatalogManager* const _catalogManager;
// Databases catalog map and mutex to protect it
- boost::mutex _mutex;
+ stdx::mutex _mutex;
ShardedDatabasesMap _databases;
};
diff --git a/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp b/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp
index f120c0e23ec..dae7488812a 100644
--- a/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp
+++ b/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp
@@ -376,7 +376,7 @@ namespace {
_distLockManager->startUp();
{
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_inShutdown = false;
_consistentFromLastCheck = true;
}
@@ -432,7 +432,7 @@ namespace {
void CatalogManagerLegacy::shutDown() {
LOG(1) << "CatalogManagerLegacy::shutDown() called.";
{
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_inShutdown = true;
_consistencyCheckerCV.notify_one();
}
@@ -1704,7 +1704,7 @@ namespace {
}
void CatalogManagerLegacy::_consistencyChecker() {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
while (!_inShutdown) {
lk.unlock();
const bool isConsistent = _checkConfigServersConsistent();
@@ -1718,7 +1718,7 @@ namespace {
}
bool CatalogManagerLegacy::_isConsistentFromLastCheck() {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
return _consistentFromLastCheck;
}
diff --git a/src/mongo/s/catalog/legacy/catalog_manager_legacy.h b/src/mongo/s/catalog/legacy/catalog_manager_legacy.h
index 471a7727a2d..a8744e80c95 100644
--- a/src/mongo/s/catalog/legacy/catalog_manager_legacy.h
+++ b/src/mongo/s/catalog/legacy/catalog_manager_legacy.h
@@ -191,7 +191,7 @@ namespace mongo {
std::unique_ptr<DistLockManager> _distLockManager;
// protects _inShutdown, _consistentFromLastCheck; used by _consistencyCheckerCV
- boost::mutex _mutex;
+ stdx::mutex _mutex;
// True if CatalogManagerLegacy::shutDown has been called. False, otherwise.
bool _inShutdown = false;
diff --git a/src/mongo/s/catalog/legacy/distlock.cpp b/src/mongo/s/catalog/legacy/distlock.cpp
index 4bd85c52517..28b2b5065f3 100644
--- a/src/mongo/s/catalog/legacy/distlock.cpp
+++ b/src/mongo/s/catalog/legacy/distlock.cpp
@@ -129,14 +129,14 @@ namespace mongo {
DistLockPingInfo DistributedLock::LastPings::getLastPing(const ConnectionString& conn,
const string& lockName) {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
return _lastPings[std::make_pair(conn.toString(), lockName)];
}
void DistributedLock::LastPings::setLastPing(const ConnectionString& conn,
const string& lockName,
const DistLockPingInfo& pd) {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
_lastPings[std::make_pair(conn.toString(), lockName)] = pd;
}
diff --git a/src/mongo/s/catalog/legacy/legacy_dist_lock_manager.cpp b/src/mongo/s/catalog/legacy/legacy_dist_lock_manager.cpp
index 3fffd1595b5..18cc7d537cf 100644
--- a/src/mongo/s/catalog/legacy/legacy_dist_lock_manager.cpp
+++ b/src/mongo/s/catalog/legacy/legacy_dist_lock_manager.cpp
@@ -56,13 +56,13 @@ namespace {
}
void LegacyDistLockManager::startUp() {
- boost::lock_guard<boost::mutex> sl(_mutex);
+ stdx::lock_guard<stdx::mutex> sl(_mutex);
invariant(!_pinger);
_pinger = stdx::make_unique<LegacyDistLockPinger>();
}
void LegacyDistLockManager::shutDown() {
- boost::unique_lock<boost::mutex> sl(_mutex);
+ stdx::unique_lock<stdx::mutex> sl(_mutex);
_isStopped = true;
while (!_lockMap.empty()) {
@@ -83,7 +83,7 @@ namespace {
auto distLock = stdx::make_unique<DistributedLock>(_configServer, name.toString());
{
- boost::lock_guard<boost::mutex> sl(_mutex);
+ stdx::lock_guard<stdx::mutex> sl(_mutex);
if (_isStopped) {
return Status(ErrorCodes::LockBusy, "legacy distlock manager is stopped");
@@ -142,7 +142,7 @@ namespace {
dassert(lock.isLockIDSet());
{
- boost::lock_guard<boost::mutex> sl(_mutex);
+ stdx::lock_guard<stdx::mutex> sl(_mutex);
_lockMap.insert(std::make_pair(lock.getLockID(), std::move(distLock)));
}
@@ -175,7 +175,7 @@ namespace {
unique_ptr<DistributedLock> distLock;
{
- boost::lock_guard<boost::mutex> sl(_mutex);
+ stdx::lock_guard<stdx::mutex> sl(_mutex);
auto iter = _lockMap.find(lockHandle);
invariant(iter != _lockMap.end());
@@ -188,7 +188,7 @@ namespace {
}
{
- boost::lock_guard<boost::mutex> sl(_mutex);
+ stdx::lock_guard<stdx::mutex> sl(_mutex);
if (_lockMap.empty()) {
_noLocksCV.notify_all();
}
@@ -207,7 +207,7 @@ namespace {
{
// Assumption: lockHandles are never shared across threads.
- boost::lock_guard<boost::mutex> sl(_mutex);
+ stdx::lock_guard<stdx::mutex> sl(_mutex);
auto iter = _lockMap.find(lockHandle);
invariant(iter != _lockMap.end());
@@ -218,7 +218,7 @@ namespace {
}
void LegacyDistLockManager::enablePinger(bool enable) {
- boost::lock_guard<boost::mutex> sl(_mutex);
+ stdx::lock_guard<stdx::mutex> sl(_mutex);
_pingerEnabled = enable;
}
diff --git a/src/mongo/s/catalog/legacy/legacy_dist_lock_manager.h b/src/mongo/s/catalog/legacy/legacy_dist_lock_manager.h
index db3debefd17..ce8b4b361a1 100644
--- a/src/mongo/s/catalog/legacy/legacy_dist_lock_manager.h
+++ b/src/mongo/s/catalog/legacy/legacy_dist_lock_manager.h
@@ -71,7 +71,7 @@ namespace mongo {
const ConnectionString _configServer;
- boost::mutex _mutex;
+ stdx::mutex _mutex;
boost::condition_variable _noLocksCV;
std::map<DistLockHandle, std::unique_ptr<DistributedLock>> _lockMap;
diff --git a/src/mongo/s/catalog/legacy/legacy_dist_lock_pinger.cpp b/src/mongo/s/catalog/legacy/legacy_dist_lock_pinger.cpp
index a6ac69f3162..2fcc3af1428 100644
--- a/src/mongo/s/catalog/legacy/legacy_dist_lock_pinger.cpp
+++ b/src/mongo/s/catalog/legacy/legacy_dist_lock_pinger.cpp
@@ -157,7 +157,7 @@ namespace {
// Remove old locks, if possible
// Make sure no one else is adding to this list at the same time
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
int numOldLocks = _unlockList.size();
if (numOldLocks > 0) {
@@ -241,7 +241,7 @@ namespace {
{
// Make sure we don't start multiple threads for a process id.
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_inShutdown) {
return Status(ErrorCodes::ShutdownInProgress,
@@ -263,7 +263,7 @@ namespace {
}
{
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
boost::thread thread(stdx::bind(&LegacyDistLockPinger::distLockPingThread,
this,
conn,
@@ -280,18 +280,18 @@ namespace {
void LegacyDistLockPinger::addUnlockOID(const DistLockHandle& lockID) {
// Modifying the lock from some other thread
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_unlockList.push_back(lockID);
}
bool LegacyDistLockPinger::willUnlockOID(const DistLockHandle& lockID) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
return find(_unlockList.begin(), _unlockList.end(), lockID) != _unlockList.end();
}
void LegacyDistLockPinger::stopPing(const ConnectionString& conn, const string& processId) {
{
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
string pingId = pingThreadId(conn, processId);
@@ -303,7 +303,7 @@ namespace {
void LegacyDistLockPinger::shutdown() {
{
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_inShutdown = true;
_pingStoppedCV.notify_all();
}
@@ -323,7 +323,7 @@ namespace {
return true;
}
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_inShutdown) {
return true;
@@ -335,7 +335,7 @@ namespace {
void LegacyDistLockPinger::acknowledgeStopPing(const ConnectionString& addr,
const string& processId) {
{
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
string pingId = pingThreadId(addr, processId);
@@ -354,7 +354,7 @@ namespace {
}
void LegacyDistLockPinger::waitTillNextPingTime(stdx::chrono::milliseconds duration) {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
_pingStoppedCV.wait_for(lk, duration);
}
}
diff --git a/src/mongo/s/client/multi_host_query.cpp b/src/mongo/s/client/multi_host_query.cpp
index a4fb70fffef..9da64c16f1e 100644
--- a/src/mongo/s/client/multi_host_query.cpp
+++ b/src/mongo/s/client/multi_host_query.cpp
@@ -39,7 +39,7 @@ namespace mongo {
using std::string;
using std::vector;
- typedef boost::unique_lock<boost::mutex> boost_unique_lock;
+ typedef stdx::unique_lock<stdx::mutex> boost_unique_lock;
HostThreadPool::HostThreadPool(int poolSize, bool scopeAllWork) :
_scopeAllWork(scopeAllWork), _context(new PoolContext) {
diff --git a/src/mongo/s/client/multi_host_query.h b/src/mongo/s/client/multi_host_query.h
index f50d8c3cd35..9a9585e4f88 100644
--- a/src/mongo/s/client/multi_host_query.h
+++ b/src/mongo/s/client/multi_host_query.h
@@ -29,13 +29,13 @@
#pragma once
#include <boost/thread/condition_variable.hpp>
-#include <boost/thread/mutex.hpp>
#include <boost/thread/thread.hpp>
#include <vector>
#include "mongo/base/disallow_copying.h"
#include "mongo/client/dbclientinterface.h"
#include "mongo/stdx/functional.h"
+#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -138,7 +138,7 @@ namespace mongo {
const Date_t timeoutAtMillis;
// Must be held to access the parent pointer below
- boost::mutex parentMutex;
+ stdx::mutex parentMutex;
// Set and unset by the parent operation on scheduling and destruction
MultiHostQueryOp* parentOp;
};
@@ -172,7 +172,7 @@ namespace mongo {
PendingMap _pending;
// Synchronizes below
- boost::mutex _resultsMutex;
+ stdx::mutex _resultsMutex;
// Current results recv'd
typedef std::map<ConnectionString, StatusWith<DBClientCursor*> > ResultMap;
@@ -247,7 +247,7 @@ namespace mongo {
const int _poolSize;
const bool _scopeAllWork;
- boost::mutex _mutex;
+ stdx::mutex _mutex;
typedef std::map<ConnectionString, HostThreadPool*> HostPoolMap;
HostPoolMap _pools;
};
@@ -298,7 +298,7 @@ namespace mongo {
}
// Synchronizes below
- boost::mutex mutex;
+ stdx::mutex mutex;
// The scheduled work
std::deque<Callback> scheduled;
diff --git a/src/mongo/s/client/shard_connection.cpp b/src/mongo/s/client/shard_connection.cpp
index 079783d5f7d..80bb58fd7dc 100644
--- a/src/mongo/s/client/shard_connection.cpp
+++ b/src/mongo/s/client/shard_connection.cpp
@@ -67,12 +67,12 @@ namespace {
class ActiveClientConnections {
public:
void add(const ClientConnections* cc) {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
_clientConnections.insert(cc);
}
void remove(const ClientConnections* cc) {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
_clientConnections.erase(cc);
}
@@ -389,7 +389,7 @@ namespace {
BSONArrayBuilder arr(64 * 1024); // There may be quite a few threads
{
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
for (set<const ClientConnections*>::const_iterator i = _clientConnections.begin();
i != _clientConnections.end();
++i) {
diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp
index 4d6b0ebd89e..b3183dd8f78 100644
--- a/src/mongo/s/client/shard_registry.cpp
+++ b/src/mongo/s/client/shard_registry.cpp
@@ -32,8 +32,6 @@
#include "mongo/s/client/shard_registry.h"
-#include <boost/thread/lock_guard.hpp>
-
#include "mongo/client/connection_string.h"
#include "mongo/client/remote_command_runner_impl.h"
#include "mongo/client/remote_command_targeter.h"
@@ -43,6 +41,7 @@
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/client/shard.h"
#include "mongo/stdx/memory.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/s/config.cpp b/src/mongo/s/config.cpp
index 4180d522f92..019466430e7 100644
--- a/src/mongo/s/config.cpp
+++ b/src/mongo/s/config.cpp
@@ -150,7 +150,7 @@ namespace mongo {
bool DBConfig::isSharded( const string& ns ) {
if ( ! _shardingEnabled )
return false;
- boost::lock_guard<boost::mutex> lk( _lock );
+ stdx::lock_guard<stdx::mutex> lk( _lock );
return _isSharded( ns );
}
@@ -182,7 +182,7 @@ namespace mongo {
verify( _name != "config" );
- boost::lock_guard<boost::mutex> lk( _lock );
+ stdx::lock_guard<stdx::mutex> lk( _lock );
_shardingEnabled = true;
if( save ) _save();
}
@@ -195,7 +195,7 @@ namespace mongo {
return false;
}
- boost::lock_guard<boost::mutex> lk( _lock );
+ stdx::lock_guard<stdx::mutex> lk( _lock );
CollectionInfoMap::iterator i = _collections.find( ns );
@@ -228,7 +228,7 @@ namespace mongo {
primary.reset();
{
- boost::lock_guard<boost::mutex> lk( _lock );
+ stdx::lock_guard<stdx::mutex> lk( _lock );
CollectionInfoMap::iterator i = _collections.find( ns );
@@ -282,7 +282,7 @@ namespace mongo {
ChunkManagerPtr oldManager;
{
- boost::lock_guard<boost::mutex> lk(_lock);
+ stdx::lock_guard<stdx::mutex> lk(_lock);
bool earlyReload = !_collections[ns].isSharded() && (shouldReload || forceReload);
if (earlyReload) {
@@ -323,7 +323,7 @@ namespace mongo {
invariant(newestChunk.size() == 1);
ChunkVersion v = newestChunk[0].getVersion();
if (v.equals(oldVersion)) {
- boost::lock_guard<boost::mutex> lk( _lock );
+ stdx::lock_guard<stdx::mutex> lk( _lock );
const CollectionInfo& ci = _collections[ns];
uassert(15885,
str::stream() << "not sharded after reloading from chunks : "
@@ -343,11 +343,11 @@ namespace mongo {
unique_ptr<ChunkManager> tempChunkManager;
{
- boost::lock_guard<boost::mutex> lll ( _hitConfigServerLock );
-
+ stdx::lock_guard<stdx::mutex> lll ( _hitConfigServerLock );
+
if (!newestChunk.empty() && !forceReload) {
// If we have a target we're going for see if we've hit already
- boost::lock_guard<boost::mutex> lk( _lock );
+ stdx::lock_guard<stdx::mutex> lk( _lock );
CollectionInfo& ci = _collections[ns];
@@ -376,8 +376,8 @@ namespace mongo {
}
}
- boost::lock_guard<boost::mutex> lk( _lock );
-
+ stdx::lock_guard<stdx::mutex> lk( _lock );
+
CollectionInfo& ci = _collections[ns];
uassert(14822, (string)"state changed in the middle: " + ns, ci.isSharded());
@@ -424,13 +424,13 @@ namespace mongo {
void DBConfig::setPrimary(const std::string& s) {
const auto& shard = grid.shardRegistry()->findIfExists(s);
- boost::lock_guard<boost::mutex> lk( _lock );
+ stdx::lock_guard<stdx::mutex> lk( _lock );
_primaryId = shard->getId();
_save();
}
bool DBConfig::load() {
- boost::lock_guard<boost::mutex> lk( _lock );
+ stdx::lock_guard<stdx::mutex> lk( _lock );
return _load();
}
@@ -500,7 +500,7 @@ namespace mongo {
bool successful = false;
{
- boost::lock_guard<boost::mutex> lk( _lock );
+ stdx::lock_guard<stdx::mutex> lk( _lock );
successful = _reload();
}
@@ -641,7 +641,7 @@ namespace mongo {
void DBConfig::getAllShardIds(set<ShardId>* shardIds) {
dassert(shardIds);
- boost::lock_guard<boost::mutex> lk(_lock);
+ stdx::lock_guard<stdx::mutex> lk(_lock);
shardIds->insert(getPrimaryId());
for (CollectionInfoMap::const_iterator it(_collections.begin()), end(_collections.end());
it != end;
@@ -653,7 +653,7 @@ namespace mongo {
}
void DBConfig::getAllShardedCollections( set<string>& namespaces ) {
- boost::lock_guard<boost::mutex> lk(_lock);
+ stdx::lock_guard<stdx::mutex> lk(_lock);
for( CollectionInfoMap::const_iterator i = _collections.begin(); i != _collections.end(); i++ ) {
log() << "Coll : " << i->first << " sharded? " << i->second.isSharded() << endl;
diff --git a/src/mongo/s/cursors.cpp b/src/mongo/s/cursors.cpp
index 32d838dcc1e..31f62c1e2b6 100644
--- a/src/mongo/s/cursors.cpp
+++ b/src/mongo/s/cursors.cpp
@@ -251,7 +251,7 @@ namespace mongo {
ShardedClientCursorPtr CursorCache::get( long long id ) const {
LOG(_myLogLevel) << "CursorCache::get id: " << id << endl;
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
MapSharded::const_iterator i = _cursors.find( id );
if ( i == _cursors.end() ) {
return ShardedClientCursorPtr();
@@ -262,7 +262,7 @@ namespace mongo {
int CursorCache::getMaxTimeMS( long long id ) const {
verify( id );
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
MapShardedInt::const_iterator i = _cursorsMaxTimeMS.find( id );
return ( i != _cursorsMaxTimeMS.end() ) ? i->second : 0;
}
@@ -276,7 +276,7 @@ namespace mongo {
verify( maxTimeMS == kMaxTimeCursorTimeLimitExpired
|| maxTimeMS == kMaxTimeCursorNoTimeLimit
|| maxTimeMS > 0 );
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
_cursorsMaxTimeMS[cursor->getId()] = maxTimeMS;
_cursors[cursor->getId()] = cursor;
_shardedTotal++;
@@ -287,20 +287,20 @@ namespace mongo {
verify( maxTimeMS == kMaxTimeCursorTimeLimitExpired
|| maxTimeMS == kMaxTimeCursorNoTimeLimit
|| maxTimeMS > 0 );
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
_cursorsMaxTimeMS[id] = maxTimeMS;
}
void CursorCache::remove( long long id ) {
verify( id );
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
_cursorsMaxTimeMS.erase( id );
_cursors.erase( id );
}
-
+
void CursorCache::removeRef( long long id ) {
verify( id );
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
_refs.erase( id );
_refsNS.erase( id );
cursorStatsSingleTarget.decrement();
@@ -309,7 +309,7 @@ namespace mongo {
void CursorCache::storeRef(const std::string& server, long long id, const std::string& ns) {
LOG(_myLogLevel) << "CursorCache::storeRef server: " << server << " id: " << id << endl;
verify( id );
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
_refs[id] = server;
_refsNS[id] = ns;
cursorStatsSingleTarget.increment();
@@ -317,7 +317,7 @@ namespace mongo {
string CursorCache::getRef( long long id ) const {
verify( id );
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
MapNormal::const_iterator i = _refs.find( id );
LOG(_myLogLevel) << "CursorCache::getRef id: " << id << " out: " << ( i == _refs.end() ? " NONE " : i->second ) << endl;
@@ -329,7 +329,7 @@ namespace mongo {
std::string CursorCache::getRefNS(long long id) const {
verify(id);
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
MapNormal::const_iterator i = _refsNS.find(id);
LOG(_myLogLevel) << "CursorCache::getRefNs id: " << id
@@ -343,7 +343,7 @@ namespace mongo {
long long CursorCache::genId() {
while ( true ) {
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
long long x = Listener::getElapsedTimeMillis() << 32;
x |= _random.nextInt32();
@@ -396,7 +396,7 @@ namespace mongo {
string server;
{
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
MapSharded::iterator i = _cursors.find( id );
if ( i != _cursors.end() ) {
@@ -447,7 +447,7 @@ namespace mongo {
}
void CursorCache::appendInfo( BSONObjBuilder& result ) const {
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
result.append( "sharded", static_cast<int>(cursorStatsMultiTarget.get()));
result.appendNumber( "shardedEver" , _shardedTotal );
result.append( "refs", static_cast<int>(cursorStatsSingleTarget.get()));
@@ -456,7 +456,7 @@ namespace mongo {
void CursorCache::doTimeouts() {
long long now = Listener::getElapsedTimeMillis();
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
for ( MapSharded::iterator i=_cursors.begin(); i!=_cursors.end(); ++i ) {
// Note: cursors with no timeout will always have an idleTime of 0
long long idleFor = i->second->idleTime( now );
diff --git a/src/mongo/s/d_state.cpp b/src/mongo/s/d_state.cpp
index 927b6283517..8475777b76c 100644
--- a/src/mongo/s/d_state.cpp
+++ b/src/mongo/s/d_state.cpp
@@ -95,12 +95,12 @@ namespace mongo {
}
bool ShardingState::enabled() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
return _enabled;
}
string ShardingState::getConfigServer() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
invariant(_enabled);
return grid.catalogManager()->connectionString().toString();
@@ -120,12 +120,12 @@ namespace mongo {
}
std::string ShardingState::getShardName() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
return _shardName;
}
bool ShardingState::setShardNameAndHost( const string& name, const string& host ) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
if ( _shardName.size() == 0 ) {
// TODO SERVER-2299 remotely verify the name is sound w.r.t IPs
_shardName = name;
@@ -173,20 +173,20 @@ namespace mongo {
}
void ShardingState::clearCollectionMetadata() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_collMetadata.clear();
}
// TODO we shouldn't need three ways for checking the version. Fix this.
bool ShardingState::hasVersion( const string& ns ) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
CollectionMetadataMap::const_iterator it = _collMetadata.find(ns);
return it != _collMetadata.end();
}
bool ShardingState::hasVersion( const string& ns , ChunkVersion& version ) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
CollectionMetadataMap::const_iterator it = _collMetadata.find(ns);
if ( it == _collMetadata.end() )
@@ -198,7 +198,7 @@ namespace mongo {
}
ChunkVersion ShardingState::getVersion(const string& ns) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
CollectionMetadataMap::const_iterator it = _collMetadata.find( ns );
if ( it != _collMetadata.end() ) {
@@ -215,9 +215,9 @@ namespace mongo {
const BSONObj& min,
const BSONObj& max,
ChunkVersion version) {
-
+
invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
CollectionMetadataMap::const_iterator it = _collMetadata.find( ns );
verify( it != _collMetadata.end() ) ;
@@ -247,8 +247,8 @@ namespace mongo {
CollectionMetadataPtr prevMetadata) {
invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
- boost::lock_guard<boost::mutex> lk( _mutex );
-
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
+
log() << "ShardingState::undoDonateChunk acquired _mutex" << endl;
CollectionMetadataMap::iterator it = _collMetadata.find( ns );
@@ -262,9 +262,9 @@ namespace mongo {
const BSONObj& max,
const OID& epoch,
string* errMsg ) {
-
+
invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
CollectionMetadataMap::const_iterator it = _collMetadata.find( ns );
if ( it == _collMetadata.end() ) {
@@ -307,9 +307,9 @@ namespace mongo {
const BSONObj& max,
const OID& epoch,
string* errMsg ) {
-
+
invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
CollectionMetadataMap::const_iterator it = _collMetadata.find( ns );
if ( it == _collMetadata.end() ) {
@@ -352,9 +352,9 @@ namespace mongo {
const BSONObj& max,
const vector<BSONObj>& splitKeys,
ChunkVersion version ) {
-
+
invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
CollectionMetadataMap::const_iterator it = _collMetadata.find( ns );
verify( it != _collMetadata.end() ) ;
@@ -378,7 +378,7 @@ namespace mongo {
ChunkVersion mergedVersion ) {
invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
CollectionMetadataMap::const_iterator it = _collMetadata.find( ns );
verify( it != _collMetadata.end() );
@@ -396,7 +396,7 @@ namespace mongo {
}
void ShardingState::resetMetadata( const string& ns ) {
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
warning() << "resetting metadata for " << ns << ", this should only be used in testing"
<< endl;
@@ -430,7 +430,7 @@ namespace mongo {
CollectionMetadataPtr storedMetadata;
{
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
CollectionMetadataMap::iterator it = _collMetadata.find( ns );
if ( it != _collMetadata.end() ) storedMetadata = it->second;
}
@@ -478,7 +478,7 @@ namespace mongo {
void ShardingState::_initialize(const string& server) {
// Ensure only one caller at a time initializes
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_enabled) {
// TODO: Do we need to throw exception if the config servers have changed from what we
@@ -526,7 +526,7 @@ namespace mongo {
CollectionMetadataPtr beforeMetadata;
{
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
// We can't reload if sharding is not enabled - i.e. without a config server location
if (!_enabled) {
@@ -648,7 +648,7 @@ namespace mongo {
// Get the metadata now that the load has completed
//
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
// Don't reload if our config server has changed or sharding is no longer enabled
if (!_enabled) {
@@ -803,7 +803,7 @@ namespace mongo {
}
void ShardingState::appendInfo(BSONObjBuilder& builder) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
builder.appendBool("enabled", _enabled);
if (!_enabled) {
@@ -836,7 +836,7 @@ namespace mongo {
}
CollectionMetadataPtr ShardingState::getCollectionMetadata( const string& ns ) {
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
CollectionMetadataMap::const_iterator it = _collMetadata.find( ns );
if ( it == _collMetadata.end() ) {
@@ -890,7 +890,7 @@ namespace mongo {
static mongo::mutex lock;
static bool done = false;
- boost::lock_guard<boost::mutex> lk(lock);
+ stdx::lock_guard<stdx::mutex> lk(lock);
if (!done) {
log() << "first cluster operation detected, adding sharding hook to enable versioning "
"and authentication to remote servers";
diff --git a/src/mongo/s/version_manager.cpp b/src/mongo/s/version_manager.cpp
index 69cacfaeebc..967ce82d308 100644
--- a/src/mongo/s/version_manager.cpp
+++ b/src/mongo/s/version_manager.cpp
@@ -68,7 +68,7 @@ namespace mongo {
struct ConnectionShardStatus {
bool hasAnySequenceSet(DBClientBase* conn) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
SequenceMap::const_iterator seenConnIt = _map.find(conn->getConnectionId());
return seenConnIt != _map.end() && seenConnIt->second.size() > 0;
@@ -78,7 +78,7 @@ namespace mongo {
const string& ns,
unsigned long long* sequence) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
SequenceMap::const_iterator seenConnIt = _map.find(conn->getConnectionId());
if (seenConnIt == _map.end())
@@ -93,12 +93,12 @@ namespace mongo {
}
void setSequence( DBClientBase * conn , const string& ns , const unsigned long long& s ) {
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
_map[conn->getConnectionId()][ns] = s;
}
void reset( DBClientBase * conn ) {
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
_map.erase( conn->getConnectionId() );
}
diff --git a/src/mongo/scripting/engine.cpp b/src/mongo/scripting/engine.cpp
index f7275d8235d..67f2d12d0f0 100644
--- a/src/mongo/scripting/engine.cpp
+++ b/src/mongo/scripting/engine.cpp
@@ -314,7 +314,7 @@ namespace {
class ScopeCache {
public:
void release(const string& poolName, const std::shared_ptr<Scope>& scope) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
if (scope->hasOutOfMemoryException()) {
// make some room
@@ -340,7 +340,7 @@ namespace {
}
std::shared_ptr<Scope> tryAcquire(OperationContext* txn, const string& poolName) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
for (Pools::iterator it = _pools.begin(); it != _pools.end(); ++it) {
if (it->poolName == poolName) {
diff --git a/src/mongo/scripting/engine_v8-3.25.cpp b/src/mongo/scripting/engine_v8-3.25.cpp
index 9b2f82fe359..13e3fac2f30 100644
--- a/src/mongo/scripting/engine_v8-3.25.cpp
+++ b/src/mongo/scripting/engine_v8-3.25.cpp
@@ -394,7 +394,7 @@ namespace mongo {
}
void V8ScriptEngine::interrupt(unsigned opId) {
- boost::lock_guard<boost::mutex> intLock(_globalInterruptLock);
+ stdx::lock_guard<stdx::mutex> intLock(_globalInterruptLock);
OpIdToScopeMap::iterator iScope = _opToScopeMap.find(opId);
if (iScope == _opToScopeMap.end()) {
// got interrupt request for a scope that no longer exists
@@ -407,7 +407,7 @@ namespace mongo {
}
void V8ScriptEngine::interruptAll() {
- boost::lock_guard<boost::mutex> interruptLock(_globalInterruptLock);
+ stdx::lock_guard<stdx::mutex> interruptLock(_globalInterruptLock);
for (OpIdToScopeMap::iterator iScope = _opToScopeMap.begin();
iScope != _opToScopeMap.end(); ++iScope) {
iScope->second->kill();
@@ -415,7 +415,7 @@ namespace mongo {
}
void V8Scope::registerOperation(OperationContext* txn) {
- boost::lock_guard<boost::mutex> giLock(_engine->_globalInterruptLock);
+ stdx::lock_guard<stdx::mutex> giLock(_engine->_globalInterruptLock);
invariant(_opId == 0);
_opId = txn->getOpID();
_engine->_opToScopeMap[_opId] = this;
@@ -427,7 +427,7 @@ namespace mongo {
}
void V8Scope::unregisterOperation() {
- boost::lock_guard<boost::mutex> giLock(_engine->_globalInterruptLock);
+ stdx::lock_guard<stdx::mutex> giLock(_engine->_globalInterruptLock);
LOG(2) << "V8Scope " << static_cast<const void*>(this) << " unregistered for op "
<< _opId << endl;
if (_opId != 0) {
@@ -441,7 +441,7 @@ namespace mongo {
bool V8Scope::nativePrologue() {
v8::Locker l(_isolate);
- boost::lock_guard<boost::mutex> cbEnterLock(_interruptLock);
+ stdx::lock_guard<stdx::mutex> cbEnterLock(_interruptLock);
if (v8::V8::IsExecutionTerminating(_isolate)) {
LOG(2) << "v8 execution interrupted. isolate: "
<< static_cast<const void*>(_isolate) << endl;
@@ -460,7 +460,7 @@ namespace mongo {
bool V8Scope::nativeEpilogue() {
v8::Locker l(_isolate);
- boost::lock_guard<boost::mutex> cbLeaveLock(_interruptLock);
+ stdx::lock_guard<stdx::mutex> cbLeaveLock(_interruptLock);
_inNativeExecution = false;
if (v8::V8::IsExecutionTerminating(_isolate)) {
LOG(2) << "v8 execution interrupted. isolate: "
@@ -477,7 +477,7 @@ namespace mongo {
}
void V8Scope::kill() {
- boost::lock_guard<boost::mutex> interruptLock(_interruptLock);
+ stdx::lock_guard<stdx::mutex> interruptLock(_interruptLock);
if (!_inNativeExecution) {
// Set the TERMINATE flag on the stack guard for this isolate.
// This won't happen between calls to nativePrologue and nativeEpilogue().
diff --git a/src/mongo/scripting/engine_v8.cpp b/src/mongo/scripting/engine_v8.cpp
index 50477bbfb99..b2f913ce70a 100644
--- a/src/mongo/scripting/engine_v8.cpp
+++ b/src/mongo/scripting/engine_v8.cpp
@@ -373,7 +373,7 @@ namespace mongo {
}
void V8ScriptEngine::interrupt(unsigned opId) {
- boost::lock_guard<boost::mutex> intLock(_globalInterruptLock);
+ stdx::lock_guard<stdx::mutex> intLock(_globalInterruptLock);
OpIdToScopeMap::iterator iScope = _opToScopeMap.find(opId);
if (iScope == _opToScopeMap.end()) {
// got interrupt request for a scope that no longer exists
@@ -386,7 +386,7 @@ namespace mongo {
}
void V8ScriptEngine::interruptAll() {
- boost::lock_guard<boost::mutex> interruptLock(_globalInterruptLock);
+ stdx::lock_guard<stdx::mutex> interruptLock(_globalInterruptLock);
for (OpIdToScopeMap::iterator iScope = _opToScopeMap.begin();
iScope != _opToScopeMap.end(); ++iScope) {
iScope->second->kill();
@@ -394,7 +394,7 @@ namespace mongo {
}
void V8Scope::registerOperation(OperationContext* txn) {
- boost::lock_guard<boost::mutex> giLock(_engine->_globalInterruptLock);
+ stdx::lock_guard<stdx::mutex> giLock(_engine->_globalInterruptLock);
invariant(_opId == 0);
_opId = txn->getOpID();
_engine->_opToScopeMap[_opId] = this;
@@ -406,7 +406,7 @@ namespace mongo {
}
void V8Scope::unregisterOperation() {
- boost::lock_guard<boost::mutex> giLock(_engine->_globalInterruptLock);
+ stdx::lock_guard<stdx::mutex> giLock(_engine->_globalInterruptLock);
LOG(2) << "V8Scope " << static_cast<const void*>(this) << " unregistered for op " << _opId << endl;
if (_opId != 0) {
// scope is currently associated with an operation id
@@ -419,7 +419,7 @@ namespace mongo {
bool V8Scope::nativePrologue() {
v8::Locker l(_isolate);
- boost::lock_guard<boost::mutex> cbEnterLock(_interruptLock);
+ stdx::lock_guard<stdx::mutex> cbEnterLock(_interruptLock);
if (v8::V8::IsExecutionTerminating(_isolate)) {
LOG(2) << "v8 execution interrupted. isolate: " << static_cast<const void*>(_isolate) << endl;
return false;
@@ -436,7 +436,7 @@ namespace mongo {
bool V8Scope::nativeEpilogue() {
v8::Locker l(_isolate);
- boost::lock_guard<boost::mutex> cbLeaveLock(_interruptLock);
+ stdx::lock_guard<stdx::mutex> cbLeaveLock(_interruptLock);
_inNativeExecution = false;
if (v8::V8::IsExecutionTerminating(_isolate)) {
LOG(2) << "v8 execution interrupted. isolate: " << static_cast<const void*>(_isolate) << endl;
@@ -451,7 +451,7 @@ namespace mongo {
}
void V8Scope::kill() {
- boost::lock_guard<boost::mutex> interruptLock(_interruptLock);
+ stdx::lock_guard<stdx::mutex> interruptLock(_interruptLock);
if (!_inNativeExecution) {
// Set the TERMINATE flag on the stack guard for this isolate.
// This won't happen between calls to nativePrologue and nativeEpilogue().
diff --git a/src/mongo/scripting/v8-3.25_utils.cpp b/src/mongo/scripting/v8-3.25_utils.cpp
index a33f4ff723f..f18de5c4bd8 100644
--- a/src/mongo/scripting/v8-3.25_utils.cpp
+++ b/src/mongo/scripting/v8-3.25_utils.cpp
@@ -32,7 +32,6 @@
#include "mongo/scripting/v8-3.25_utils.h"
#include <boost/thread/condition_variable.hpp>
-#include <boost/thread/mutex.hpp>
#include <boost/thread/thread.hpp>
#include <iostream>
#include <map>
@@ -42,6 +41,7 @@
#include "mongo/platform/cstdint.h"
#include "mongo/scripting/engine_v8-3.25.h"
#include "mongo/scripting/v8-3.25_db.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
@@ -158,15 +158,15 @@ namespace mongo {
BSONObj _args;
BSONObj _returnData;
void setErrored(bool value) {
- boost::lock_guard<boost::mutex> lck(_erroredMutex);
+ stdx::lock_guard<stdx::mutex> lck(_erroredMutex);
_errored = value;
}
bool getErrored() {
- boost::lock_guard<boost::mutex> lck(_erroredMutex);
+ stdx::lock_guard<stdx::mutex> lck(_erroredMutex);
return _errored;
}
private:
- boost::mutex _erroredMutex;
+ stdx::mutex _erroredMutex;
bool _errored;
};
@@ -246,12 +246,12 @@ namespace mongo {
struct Latch {
Latch(int32_t count) : count(count) {}
boost::condition_variable cv;
- boost::mutex mutex;
+ stdx::mutex mutex;
int32_t count;
};
std::shared_ptr<Latch> get(int32_t desc) {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
Map::iterator iter = _latches.find(desc);
jsassert(iter != _latches.end(), "not a valid CountDownLatch descriptor");
return iter->second;
@@ -259,27 +259,27 @@ namespace mongo {
typedef std::map< int32_t, std::shared_ptr<Latch> > Map;
Map _latches;
- boost::mutex _mutex;
+ stdx::mutex _mutex;
int32_t _counter;
public:
CountDownLatchHolder() : _counter(0) {}
int32_t make(int32_t count) {
jsassert(count >= 0, "argument must be >= 0");
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
int32_t desc = ++_counter;
_latches.insert(std::make_pair(desc, std::make_shared<Latch>(count)));
return desc;
}
void await(int32_t desc) {
std::shared_ptr<Latch> latch = get(desc);
- boost::unique_lock<boost::mutex> lock(latch->mutex);
+ stdx::unique_lock<stdx::mutex> lock(latch->mutex);
while (latch->count != 0) {
latch->cv.wait(lock);
}
}
void countDown(int32_t desc) {
std::shared_ptr<Latch> latch = get(desc);
- boost::unique_lock<boost::mutex> lock(latch->mutex);
+ stdx::unique_lock<stdx::mutex> lock(latch->mutex);
if (latch->count > 0) {
latch->count--;
}
@@ -289,7 +289,7 @@ namespace mongo {
}
int32_t getCount(int32_t desc) {
std::shared_ptr<Latch> latch = get(desc);
- boost::unique_lock<boost::mutex> lock(latch->mutex);
+ stdx::unique_lock<stdx::mutex> lock(latch->mutex);
return latch->count;
}
};
diff --git a/src/mongo/scripting/v8_deadline_monitor.h b/src/mongo/scripting/v8_deadline_monitor.h
index fae4ac2d87f..ee1b6a63daa 100644
--- a/src/mongo/scripting/v8_deadline_monitor.h
+++ b/src/mongo/scripting/v8_deadline_monitor.h
@@ -77,7 +77,7 @@ namespace mongo {
~DeadlineMonitor() {
{
// ensure the monitor thread has been stopped before destruction
- boost::lock_guard<boost::mutex> lk(_deadlineMutex);
+ stdx::lock_guard<stdx::mutex> lk(_deadlineMutex);
_inShutdown = true;
_newDeadlineAvailable.notify_one();
}
@@ -93,7 +93,7 @@ namespace mongo {
*/
void startDeadline(_Task* const task, uint64_t timeoutMs) {
const auto deadline = Date_t::now() + Milliseconds(timeoutMs);
- boost::lock_guard<boost::mutex> lk(_deadlineMutex);
+ stdx::lock_guard<stdx::mutex> lk(_deadlineMutex);
_tasks[task] = deadline;
@@ -109,7 +109,7 @@ namespace mongo {
* @return true if the task was found and erased
*/
bool stopDeadline(_Task* const task) {
- boost::lock_guard<boost::mutex> lk(_deadlineMutex);
+ stdx::lock_guard<stdx::mutex> lk(_deadlineMutex);
return _tasks.erase(task);
}
@@ -120,7 +120,7 @@ namespace mongo {
* _Task::kill() is invoked.
*/
void deadlineMonitorThread() {
- boost::unique_lock<boost::mutex> lk(_deadlineMutex);
+ stdx::unique_lock<stdx::mutex> lk(_deadlineMutex);
while (!_inShutdown) {
// get the next interval to wait
@@ -160,7 +160,7 @@ namespace mongo {
typedef unordered_map<_Task*, Date_t> TaskDeadlineMap;
TaskDeadlineMap _tasks; // map of running tasks with deadlines
- boost::mutex _deadlineMutex; // protects all non-const members, except _monitorThread
+ stdx::mutex _deadlineMutex; // protects all non-const members, except _monitorThread
boost::condition_variable _newDeadlineAvailable; // Signaled for timeout, start and stop
boost::thread _monitorThread; // the deadline monitor thread
Date_t _nearestDeadlineWallclock = Date_t::max(); // absolute time of the nearest deadline
diff --git a/src/mongo/scripting/v8_deadline_monitor_test.cpp b/src/mongo/scripting/v8_deadline_monitor_test.cpp
index e0f06fd91ff..a76246d4031 100644
--- a/src/mongo/scripting/v8_deadline_monitor_test.cpp
+++ b/src/mongo/scripting/v8_deadline_monitor_test.cpp
@@ -44,13 +44,13 @@ namespace mongo {
public:
TaskGroup() : _c(), _killCount(0), _targetKillCount(0) { }
void noteKill() {
- boost::lock_guard<boost::mutex> lk(_m);
+ stdx::lock_guard<stdx::mutex> lk(_m);
++_killCount;
if (_killCount >= _targetKillCount)
_c.notify_one();
}
void waitForKillCount(uint64_t target) {
- boost::unique_lock<boost::mutex> lk(_m);
+ stdx::unique_lock<stdx::mutex> lk(_m);
_targetKillCount = target;
while (_killCount < _targetKillCount)
_c.wait(lk);
diff --git a/src/mongo/scripting/v8_utils.cpp b/src/mongo/scripting/v8_utils.cpp
index 13909d87fd9..53420b814b7 100644
--- a/src/mongo/scripting/v8_utils.cpp
+++ b/src/mongo/scripting/v8_utils.cpp
@@ -32,7 +32,6 @@
#include "mongo/scripting/v8_utils.h"
#include <boost/thread/condition_variable.hpp>
-#include <boost/thread/mutex.hpp>
#include <boost/thread/thread.hpp>
#include <iostream>
#include <map>
@@ -42,6 +41,7 @@
#include "mongo/platform/cstdint.h"
#include "mongo/scripting/engine_v8.h"
#include "mongo/scripting/v8_db.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
@@ -157,15 +157,15 @@ namespace mongo {
BSONObj _args;
BSONObj _returnData;
void setErrored(bool value) {
- boost::lock_guard<boost::mutex> lck(_erroredMutex);
+ stdx::lock_guard<stdx::mutex> lck(_erroredMutex);
_errored = value;
}
bool getErrored() {
- boost::lock_guard<boost::mutex> lck(_erroredMutex);
+ stdx::lock_guard<stdx::mutex> lck(_erroredMutex);
return _errored;
}
private:
- boost::mutex _erroredMutex;
+ stdx::mutex _erroredMutex;
bool _errored;
};
@@ -243,12 +243,12 @@ namespace mongo {
struct Latch {
Latch(int32_t count) : count(count) {}
boost::condition_variable cv;
- boost::mutex mutex;
+ stdx::mutex mutex;
int32_t count;
};
std::shared_ptr<Latch> get(int32_t desc) {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
Map::iterator iter = _latches.find(desc);
jsassert(iter != _latches.end(), "not a valid CountDownLatch descriptor");
return iter->second;
@@ -256,27 +256,27 @@ namespace mongo {
typedef std::map< int32_t, std::shared_ptr<Latch> > Map;
Map _latches;
- boost::mutex _mutex;
+ stdx::mutex _mutex;
int32_t _counter;
public:
CountDownLatchHolder() : _counter(0) {}
int32_t make(int32_t count) {
jsassert(count >= 0, "argument must be >= 0");
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
int32_t desc = ++_counter;
_latches.insert(std::make_pair(desc, std::make_shared<Latch>(count)));
return desc;
}
void await(int32_t desc) {
std::shared_ptr<Latch> latch = get(desc);
- boost::unique_lock<boost::mutex> lock(latch->mutex);
+ stdx::unique_lock<stdx::mutex> lock(latch->mutex);
while (latch->count != 0) {
latch->cv.wait(lock);
}
}
void countDown(int32_t desc) {
std::shared_ptr<Latch> latch = get(desc);
- boost::unique_lock<boost::mutex> lock(latch->mutex);
+ stdx::unique_lock<stdx::mutex> lock(latch->mutex);
if (latch->count > 0) {
latch->count--;
}
@@ -286,7 +286,7 @@ namespace mongo {
}
int32_t getCount(int32_t desc) {
std::shared_ptr<Latch> latch = get(desc);
- boost::unique_lock<boost::mutex> lock(latch->mutex);
+ stdx::unique_lock<stdx::mutex> lock(latch->mutex);
return latch->count;
}
};
diff --git a/src/mongo/shell/bench.cpp b/src/mongo/shell/bench.cpp
index 1ef831adf28..3fb31c2b0ad 100644
--- a/src/mongo/shell/bench.cpp
+++ b/src/mongo/shell/bench.cpp
@@ -246,7 +246,7 @@ namespace mongo {
}
void BenchRunState::waitForState(State awaitedState) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
switch ( awaitedState ) {
case BRS_RUNNING:
@@ -274,7 +274,7 @@ namespace mongo {
}
void BenchRunState::assertFinished() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
verify(0 == _numUnstartedWorkers + _numActiveWorkers);
}
@@ -287,7 +287,7 @@ namespace mongo {
}
void BenchRunState::onWorkerStarted() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
verify( _numUnstartedWorkers > 0 );
--_numUnstartedWorkers;
++_numActiveWorkers;
@@ -297,7 +297,7 @@ namespace mongo {
}
void BenchRunState::onWorkerFinished() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
verify( _numActiveWorkers > 0 );
--_numActiveWorkers;
if (_numActiveWorkers + _numUnstartedWorkers == 0) {
@@ -790,7 +790,7 @@ namespace mongo {
_config(config) {
_oid.init();
- boost::lock_guard<boost::mutex> lk(_staticMutex);
+ stdx::lock_guard<stdx::mutex> lk(_staticMutex);
_activeRuns[_oid] = this;
}
@@ -853,7 +853,7 @@ namespace mongo {
}
{
- boost::lock_guard<boost::mutex> lk(_staticMutex);
+ stdx::lock_guard<stdx::mutex> lk(_staticMutex);
_activeRuns.erase( _oid );
}
}
@@ -864,7 +864,7 @@ namespace mongo {
}
BenchRunner* BenchRunner::get( OID oid ) {
- boost::lock_guard<boost::mutex> lk(_staticMutex);
+ stdx::lock_guard<stdx::mutex> lk(_staticMutex);
return _activeRuns[ oid ];
}
@@ -927,7 +927,7 @@ namespace mongo {
return zoo;
}
- boost::mutex BenchRunner::_staticMutex;
+ stdx::mutex BenchRunner::_staticMutex;
map< OID, BenchRunner* > BenchRunner::_activeRuns;
/**
diff --git a/src/mongo/shell/bench.h b/src/mongo/shell/bench.h
index 16b2b8ac156..2415b2366b7 100644
--- a/src/mongo/shell/bench.h
+++ b/src/mongo/shell/bench.h
@@ -31,11 +31,11 @@
#include <string>
#include <boost/thread/condition.hpp>
-#include <boost/thread/mutex.hpp>
#include "mongo/client/dbclientinterface.h"
#include "mongo/db/jsobj.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/timer.h"
namespace pcrecpp {
@@ -321,7 +321,7 @@ namespace mongo {
void onWorkerFinished();
private:
- boost::mutex _mutex;
+ stdx::mutex _mutex;
boost::condition _stateChangeCondition;
unsigned _numUnstartedWorkers;
unsigned _numActiveWorkers;
@@ -449,7 +449,7 @@ namespace mongo {
private:
// TODO: Same as for createWithConfig.
- static boost::mutex _staticMutex;
+ static stdx::mutex _staticMutex;
static std::map< OID, BenchRunner* > _activeRuns;
OID _oid;
diff --git a/src/mongo/shell/clientAndShell.cpp b/src/mongo/shell/clientAndShell.cpp
index 3a12c54f886..a7ee344a03e 100644
--- a/src/mongo/shell/clientAndShell.cpp
+++ b/src/mongo/shell/clientAndShell.cpp
@@ -53,7 +53,7 @@ namespace mongo {
void dbexit( ExitCode returnCode, const char *whyMsg ) {
{
- boost::lock_guard<boost::mutex> lk( shell_utils::mongoProgramOutputMutex );
+ stdx::lock_guard<stdx::mutex> lk( shell_utils::mongoProgramOutputMutex );
dbexitCalled = true;
}
diff --git a/src/mongo/shell/dbshell.cpp b/src/mongo/shell/dbshell.cpp
index d159224057b..14ec8b3fb2c 100644
--- a/src/mongo/shell/dbshell.cpp
+++ b/src/mongo/shell/dbshell.cpp
@@ -183,7 +183,7 @@ namespace mongo {
void exitCleanly(ExitCode code) {
{
- boost::lock_guard<boost::mutex> lk(mongo::shell_utils::mongoProgramOutputMutex);
+ stdx::lock_guard<stdx::mutex> lk(mongo::shell_utils::mongoProgramOutputMutex);
mongo::dbexitCalled = true;
}
@@ -889,7 +889,7 @@ int _main( int argc, char* argv[], char **envp ) {
}
{
- boost::lock_guard<boost::mutex> lk(mongo::shell_utils::mongoProgramOutputMutex);
+ stdx::lock_guard<stdx::mutex> lk(mongo::shell_utils::mongoProgramOutputMutex);
mongo::dbexitCalled = true;
}
return 0;
diff --git a/src/mongo/shell/shell_utils.cpp b/src/mongo/shell/shell_utils.cpp
index ce951fc92d5..0b5c78376ae 100644
--- a/src/mongo/shell/shell_utils.cpp
+++ b/src/mongo/shell/shell_utils.cpp
@@ -296,14 +296,14 @@ namespace mongo {
BSONObj info;
if ( client.runCommand( "admin", BSON( "whatsmyuri" << 1 ), info ) ) {
string connstr = dynamic_cast<DBClientBase&>( client ).getServerAddress();
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
_connectionUris[ connstr ].insert( info[ "you" ].str() );
- }
+ }
}
void ConnectionRegistry::killOperationsOnAllConnections( bool withPrompt ) const {
Prompter prompter( "do you want to kill the current op(s) on the server?" );
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
for( map<string,set<string> >::const_iterator i = _connectionUris.begin();
i != _connectionUris.end(); ++i ) {
@@ -371,6 +371,6 @@ namespace mongo {
}
- mongo::mutex &mongoProgramOutputMutex(*(new boost::mutex()));
+ mongo::mutex &mongoProgramOutputMutex(*(new stdx::mutex()));
}
}
diff --git a/src/mongo/shell/shell_utils.h b/src/mongo/shell/shell_utils.h
index 040caa41653..06a43e0b936 100644
--- a/src/mongo/shell/shell_utils.h
+++ b/src/mongo/shell/shell_utils.h
@@ -32,6 +32,7 @@
#include <boost/thread/mutex.hpp>
#include "mongo/db/jsobj.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/mutex.h"
namespace mongo {
@@ -82,7 +83,7 @@ namespace mongo {
// This mutex helps the shell serialize output on exit, to avoid deadlocks at shutdown. So
// it also protects the global dbexitCalled.
- extern boost::mutex &mongoProgramOutputMutex;
+ extern stdx::mutex &mongoProgramOutputMutex;
// Helper to tell if a file exists cross platform
// TODO: Remove this when we have a cross platform file utility library
diff --git a/src/mongo/shell/shell_utils_launcher.cpp b/src/mongo/shell/shell_utils_launcher.cpp
index 2b8aa0239db..618b08b3de1 100644
--- a/src/mongo/shell/shell_utils_launcher.cpp
+++ b/src/mongo/shell/shell_utils_launcher.cpp
@@ -90,18 +90,18 @@ namespace mongo {
ProgramOutputMultiplexer programOutputLogger;
bool ProgramRegistry::isPortRegistered( int port ) const {
- boost::lock_guard<boost::recursive_mutex> lk( _mutex );
+ stdx::lock_guard<boost::recursive_mutex> lk( _mutex );
return _ports.count( port ) == 1;
}
-
+
ProcessId ProgramRegistry::pidForPort( int port ) const {
- boost::lock_guard<boost::recursive_mutex> lk( _mutex );
+ stdx::lock_guard<boost::recursive_mutex> lk( _mutex );
verify( isPortRegistered( port ) );
return _ports.find( port )->second.first;
}
-
+
int ProgramRegistry::portForPid(ProcessId pid) const {
- boost::lock_guard<boost::recursive_mutex> lk(_mutex);
+ stdx::lock_guard<boost::recursive_mutex> lk(_mutex);
for (map<int, pair<ProcessId, int> >::const_iterator it = _ports.begin();
it != _ports.end(); ++it)
{
@@ -112,13 +112,13 @@ namespace mongo {
}
void ProgramRegistry::registerPort( int port, ProcessId pid, int output ) {
- boost::lock_guard<boost::recursive_mutex> lk( _mutex );
+ stdx::lock_guard<boost::recursive_mutex> lk( _mutex );
verify( !isPortRegistered( port ) );
_ports.insert( make_pair( port, make_pair( pid, output ) ) );
}
void ProgramRegistry::deletePort( int port ) {
- boost::lock_guard<boost::recursive_mutex> lk( _mutex );
+ stdx::lock_guard<boost::recursive_mutex> lk( _mutex );
if ( !isPortRegistered( port ) ) {
return;
}
@@ -127,7 +127,7 @@ namespace mongo {
}
void ProgramRegistry::getRegisteredPorts( vector<int> &ports ) {
- boost::lock_guard<boost::recursive_mutex> lk( _mutex );
+ stdx::lock_guard<boost::recursive_mutex> lk( _mutex );
for( map<int,pair<ProcessId,int> >::const_iterator i = _ports.begin(); i != _ports.end();
++i ) {
ports.push_back( i->first );
@@ -135,18 +135,18 @@ namespace mongo {
}
bool ProgramRegistry::isPidRegistered( ProcessId pid ) const {
- boost::lock_guard<boost::recursive_mutex> lk( _mutex );
+ stdx::lock_guard<boost::recursive_mutex> lk( _mutex );
return _pids.count( pid ) == 1;
}
void ProgramRegistry::registerPid( ProcessId pid, int output ) {
- boost::lock_guard<boost::recursive_mutex> lk( _mutex );
+ stdx::lock_guard<boost::recursive_mutex> lk( _mutex );
verify( !isPidRegistered( pid ) );
_pids.insert( make_pair( pid, output ) );
}
void ProgramRegistry::deletePid(ProcessId pid) {
- boost::lock_guard<boost::recursive_mutex> lk(_mutex);
+ stdx::lock_guard<boost::recursive_mutex> lk(_mutex);
if (!isPidRegistered(pid)) {
int port = portForPid(pid);
if (port < 0) return;
@@ -156,23 +156,23 @@ namespace mongo {
close(_pids.find(pid)->second);
_pids.erase(pid);
}
-
+
void ProgramRegistry::getRegisteredPids( vector<ProcessId> &pids ) {
- boost::lock_guard<boost::recursive_mutex> lk( _mutex );
+ stdx::lock_guard<boost::recursive_mutex> lk( _mutex );
for( map<ProcessId,int>::const_iterator i = _pids.begin(); i != _pids.end(); ++i ) {
pids.push_back( i->first );
}
}
-
+
ProgramRegistry &registry = *( new ProgramRegistry() );
void goingAwaySoon() {
- boost::lock_guard<boost::mutex> lk( mongoProgramOutputMutex );
+ stdx::lock_guard<stdx::mutex> lk( mongoProgramOutputMutex );
mongo::dbexitCalled = true;
}
void ProgramOutputMultiplexer::appendLine( int port, ProcessId pid, const char *line ) {
- boost::lock_guard<boost::mutex> lk( mongoProgramOutputMutex );
+ stdx::lock_guard<stdx::mutex> lk( mongoProgramOutputMutex );
if( mongo::dbexitCalled ) throw "program is terminating";
stringstream buf;
if ( port > 0 )
@@ -185,7 +185,7 @@ namespace mongo {
}
string ProgramOutputMultiplexer::str() const {
- boost::lock_guard<boost::mutex> lk( mongoProgramOutputMutex );
+ stdx::lock_guard<stdx::mutex> lk( mongoProgramOutputMutex );
string ret = _buffer.str();
size_t len = ret.length();
if ( len > 100000 ) {
@@ -195,8 +195,8 @@ namespace mongo {
}
void ProgramOutputMultiplexer::clear() {
- boost::lock_guard<boost::mutex> lk( mongoProgramOutputMutex );
- _buffer.str( "" );
+ stdx::lock_guard<stdx::mutex> lk( mongoProgramOutputMutex );
+ _buffer.str( "" );
}
ProgramRunner::ProgramRunner( const BSONObj &args ) {
diff --git a/src/mongo/util/background_job_test.cpp b/src/mongo/util/background_job_test.cpp
index 1b2f197afcc..4e695dc5e1f 100644
--- a/src/mongo/util/background_job_test.cpp
+++ b/src/mongo/util/background_job_test.cpp
@@ -43,6 +43,8 @@ namespace {
using mongo::mutex;
using mongo::Notification;
+ namespace stdx = mongo::stdx;
+
// a global variable that can be accessed independent of the IncTester object below
// IncTester keeps it up-to-date
int GLOBAL_val;
@@ -112,7 +114,7 @@ namespace {
virtual void run() {
{
- boost::lock_guard<boost::mutex> lock( _mutex );
+ stdx::lock_guard<stdx::mutex> lock( _mutex );
ASSERT_FALSE( _hasRun );
_hasRun = true;
}
diff --git a/src/mongo/util/concurrency/mutex.h b/src/mongo/util/concurrency/mutex.h
index fa2aa248930..1d676384311 100644
--- a/src/mongo/util/concurrency/mutex.h
+++ b/src/mongo/util/concurrency/mutex.h
@@ -33,8 +33,7 @@
#include "mongo/platform/windows_basic.h"
#endif
-#include <boost/thread/mutex.hpp>
-
+#include "mongo/stdx/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/concurrency/threadlocal.h"
@@ -51,7 +50,7 @@ namespace mongo {
~StaticObserver() { _destroyingStatics = true; }
};
- using mutex = boost::mutex;
+ using mutex = stdx::mutex;
/** The concept with SimpleMutex is that it is a basic lock/unlock with no
special functionality (such as try and try timeout). Thus it can be
diff --git a/src/mongo/util/concurrency/rwlockimpl.cpp b/src/mongo/util/concurrency/rwlockimpl.cpp
index 22f5a7df54d..659275843b8 100644
--- a/src/mongo/util/concurrency/rwlockimpl.cpp
+++ b/src/mongo/util/concurrency/rwlockimpl.cpp
@@ -37,17 +37,17 @@
#include <map>
#include <set>
#include <boost/version.hpp>
-#include <boost/thread/mutex.hpp>
#include <boost/thread/condition.hpp>
-using namespace std;
-
#include "mongo/config.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/assert_util.h"
+#include "mongo/util/concurrency/rwlockimpl.h"
+#include "mongo/util/concurrency/simplerwlock.h"
+#include "mongo/util/concurrency/threadlocal.h"
#include "mongo/util/time_support.h"
-#include "rwlockimpl.h"
-#include "simplerwlock.h"
-#include "threadlocal.h"
+
+using namespace std;
namespace mongo {
@@ -56,8 +56,8 @@ namespace mongo {
InitializeSRWLock(&_lock);
}
# if defined(MONGO_CONFIG_DEBUG_BUILD)
- // the code below in a debug build will check that we don't try to recursively lock,
- // which is not supported by this class. also checks that you don't unlock without
+ // the code below in a debug build will check that we don't try to recursively lock,
+ // which is not supported by this class. also checks that you don't unlock without
// having locked
void SimpleRWLock::lock() {
unsigned me = GetCurrentThreadId();
@@ -67,39 +67,39 @@ namespace mongo {
AcquireSRWLockExclusive(&_lock);
tid = me; // this is for use in the debugger to see who does have the lock
}
- void SimpleRWLock::unlock() {
+ void SimpleRWLock::unlock() {
int& state = s.getRef();
dassert( state == -1 );
state++;
tid = 0xffffffff;
ReleaseSRWLockExclusive(&_lock);
}
- void SimpleRWLock::lock_shared() {
+ void SimpleRWLock::lock_shared() {
int& state = s.getRef();
dassert( state == 0 );
state++;
AcquireSRWLockShared(&_lock);
shares.fetchAndAdd(1);
}
- void SimpleRWLock::unlock_shared() {
+ void SimpleRWLock::unlock_shared() {
int& state = s.getRef();
dassert( state == 1 );
state--;
shares.fetchAndSubtract(1);
- ReleaseSRWLockShared(&_lock);
+ ReleaseSRWLockShared(&_lock);
}
# else
void SimpleRWLock::lock() {
AcquireSRWLockExclusive(&_lock);
}
- void SimpleRWLock::unlock() {
+ void SimpleRWLock::unlock() {
ReleaseSRWLockExclusive(&_lock);
}
- void SimpleRWLock::lock_shared() {
+ void SimpleRWLock::lock_shared() {
AcquireSRWLockShared(&_lock);
}
- void SimpleRWLock::unlock_shared() {
- ReleaseSRWLockShared(&_lock);
+ void SimpleRWLock::unlock_shared() {
+ ReleaseSRWLockShared(&_lock);
}
# endif
#else
diff --git a/src/mongo/util/concurrency/synchronization.cpp b/src/mongo/util/concurrency/synchronization.cpp
index d7cf3575c32..c3b90019a1b 100644
--- a/src/mongo/util/concurrency/synchronization.cpp
+++ b/src/mongo/util/concurrency/synchronization.cpp
@@ -65,14 +65,14 @@ namespace {
}
void Notification::waitToBeNotified() {
- boost::unique_lock<boost::mutex> lock( _mutex );
+ stdx::unique_lock<stdx::mutex> lock( _mutex );
while ( lookFor != cur )
_condition.wait(lock);
lookFor++;
}
void Notification::notifyOne() {
- boost::lock_guard<boost::mutex> lock( _mutex );
+ stdx::lock_guard<stdx::mutex> lock( _mutex );
verify( cur != lookFor );
cur++;
_condition.notify_one();
@@ -86,21 +86,21 @@ namespace {
_nWaiting = 0;
}
- NotifyAll::When NotifyAll::now() {
- boost::lock_guard<boost::mutex> lock( _mutex );
+ NotifyAll::When NotifyAll::now() {
+ stdx::lock_guard<stdx::mutex> lock( _mutex );
return ++_lastReturned;
}
void NotifyAll::waitFor(When e) {
- boost::unique_lock<boost::mutex> lock( _mutex );
+ stdx::unique_lock<stdx::mutex> lock( _mutex );
++_nWaiting;
while( _lastDone < e ) {
_condition.wait(lock);
}
}
- void NotifyAll::awaitBeyondNow() {
- boost::unique_lock<boost::mutex> lock( _mutex );
+ void NotifyAll::awaitBeyondNow() {
+ stdx::unique_lock<stdx::mutex> lock( _mutex );
++_nWaiting;
When e = ++_lastReturned;
while( _lastDone <= e ) {
@@ -109,7 +109,7 @@ namespace {
}
void NotifyAll::notifyAll(When e) {
- boost::unique_lock<boost::mutex> lock( _mutex );
+ stdx::unique_lock<stdx::mutex> lock( _mutex );
_lastDone = e;
_nWaiting = 0;
_condition.notify_all();
diff --git a/src/mongo/util/concurrency/thread_pool.cpp b/src/mongo/util/concurrency/thread_pool.cpp
index 971e45429bf..8b2d3c0b1d5 100644
--- a/src/mongo/util/concurrency/thread_pool.cpp
+++ b/src/mongo/util/concurrency/thread_pool.cpp
@@ -110,7 +110,7 @@ namespace mongo {
}
void ThreadPool::startThreads() {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
for (int i = 0; i < _nThreads; ++i) {
const std::string threadName(_threadNamePrefix.empty() ?
_threadNamePrefix :
@@ -138,14 +138,14 @@ namespace mongo {
}
void ThreadPool::join() {
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
while(_tasksRemaining) {
_condition.wait(lock);
}
}
void ThreadPool::schedule(Task task) {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
_tasksRemaining++;
@@ -160,7 +160,7 @@ namespace mongo {
// should only be called by a worker from the worker thread
void ThreadPool::task_done(Worker* worker) {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
if (!_tasks.empty()) {
worker->set_task(_tasks.front());
diff --git a/src/mongo/util/concurrency/thread_pool.h b/src/mongo/util/concurrency/thread_pool.h
index d4e31b8219b..33efc298d1c 100644
--- a/src/mongo/util/concurrency/thread_pool.h
+++ b/src/mongo/util/concurrency/thread_pool.h
@@ -31,10 +31,10 @@
#include <string>
#include <boost/thread/condition.hpp>
-#include <boost/thread/mutex.hpp>
#include "mongo/base/disallow_copying.h"
#include "mongo/stdx/functional.h"
+#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -86,7 +86,7 @@ namespace mongo {
int tasks_remaining() { return _tasksRemaining; }
private:
- boost::mutex _mutex;
+ stdx::mutex _mutex;
boost::condition _condition;
std::list<Worker*> _freeWorkers; //used as LIFO stack (always front)
diff --git a/src/mongo/util/fail_point.cpp b/src/mongo/util/fail_point.cpp
index 0d2a8897123..dac1813c09f 100644
--- a/src/mongo/util/fail_point.cpp
+++ b/src/mongo/util/fail_point.cpp
@@ -95,7 +95,7 @@ namespace {
* 3. Sets the new mode.
*/
- boost::lock_guard<boost::mutex> scoped(_modMutex);
+ stdx::lock_guard<stdx::mutex> scoped(_modMutex);
// Step 1
disableFailPoint();
@@ -188,7 +188,7 @@ namespace {
BSONObj FailPoint::toBSON() const {
BSONObjBuilder builder;
- boost::lock_guard<boost::mutex> scoped(_modMutex);
+ stdx::lock_guard<stdx::mutex> scoped(_modMutex);
builder.append("mode", _mode);
builder.append("data", _data);
diff --git a/src/mongo/util/fail_point.h b/src/mongo/util/fail_point.h
index 6ca1df82e14..a23ee1daced 100644
--- a/src/mongo/util/fail_point.h
+++ b/src/mongo/util/fail_point.h
@@ -28,11 +28,10 @@
#pragma once
-#include <boost/thread/mutex.hpp>
-
#include "mongo/base/disallow_copying.h"
#include "mongo/db/jsobj.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/stdx/mutex.h"
namespace mongo {
/**
@@ -158,7 +157,7 @@ namespace mongo {
BSONObj _data;
// protects _mode, _timesOrPeriod, _data
- mutable boost::mutex _modMutex;
+ mutable stdx::mutex _modMutex;
/**
* Enables this fail point.
diff --git a/src/mongo/util/fail_point_test.cpp b/src/mongo/util/fail_point_test.cpp
index 9aa3e2d1889..4fd44643cd9 100644
--- a/src/mongo/util/fail_point_test.cpp
+++ b/src/mongo/util/fail_point_test.cpp
@@ -42,6 +42,7 @@
#include "mongo/util/time_support.h"
using mongo::FailPoint;
+namespace stdx = mongo::stdx;
namespace mongo_test {
TEST(FailPoint, InitialState) {
@@ -176,7 +177,7 @@ namespace mongo_test {
void stopTest() {
{
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_inShutdown = true;
}
for (auto& t : _tasks) {
@@ -200,7 +201,7 @@ namespace mongo_test {
}
}
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_inShutdown)
break;
}
@@ -224,7 +225,7 @@ namespace mongo_test {
catch (const std::logic_error&) {
}
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_inShutdown)
break;
}
@@ -233,7 +234,7 @@ namespace mongo_test {
void simpleTask() {
while (true) {
static_cast<void>(MONGO_FAIL_POINT(_fp));
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_inShutdown)
break;
}
@@ -248,7 +249,7 @@ namespace mongo_test {
_fp.setMode(FailPoint::alwaysOn, 0, BSON("a" << 44));
}
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_inShutdown)
break;
}
@@ -256,7 +257,7 @@ namespace mongo_test {
FailPoint _fp;
std::vector<boost::thread> _tasks;
- boost::mutex _mutex;
+ stdx::mutex _mutex;
bool _inShutdown = false;
};
diff --git a/src/mongo/util/net/listen.cpp b/src/mongo/util/net/listen.cpp
index 1a7f0773588..249fe6f878e 100644
--- a/src/mongo/util/net/listen.cpp
+++ b/src/mongo/util/net/listen.cpp
@@ -247,7 +247,7 @@ namespace mongo {
{
// Wake up any threads blocked in waitUntilListening()
- boost::lock_guard<boost::mutex> lock(_readyMutex);
+ stdx::lock_guard<stdx::mutex> lock(_readyMutex);
_ready = true;
_readyCondition.notify_all();
}
@@ -422,7 +422,7 @@ namespace mongo {
{
// Wake up any threads blocked in waitUntilListening()
- boost::lock_guard<boost::mutex> lock(_readyMutex);
+ stdx::lock_guard<stdx::mutex> lock(_readyMutex);
_ready = true;
_readyCondition.notify_all();
}
@@ -571,7 +571,7 @@ namespace mongo {
}
void Listener::waitUntilListening() const {
- boost::unique_lock<boost::mutex> lock(_readyMutex);
+ stdx::unique_lock<stdx::mutex> lock(_readyMutex);
while (!_ready) {
_readyCondition.wait(lock);
}
@@ -642,7 +642,7 @@ namespace mongo {
std::set<std::string>* paths;
{
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
sockets = _sockets;
_sockets = new std::set<int>();
paths = _socketPaths;
diff --git a/src/mongo/util/net/listen.h b/src/mongo/util/net/listen.h
index 253b936959d..9d55c4da854 100644
--- a/src/mongo/util/net/listen.h
+++ b/src/mongo/util/net/listen.h
@@ -29,7 +29,6 @@
#pragma once
-#include <boost/thread/mutex.hpp>
#include <boost/thread/condition_variable.hpp>
#include <set>
#include <string>
@@ -37,6 +36,7 @@
#include "mongo/config.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/ticketholder.h"
#include "mongo/util/net/sock.h"
@@ -53,7 +53,7 @@ namespace mongo {
Listener(const std::string& name, const std::string &ip, int port, bool logConnect=true );
virtual ~Listener();
-
+
void initAndListen(); // never returns unless error (start a thread)
/* spawn a thread, etc., then return */
@@ -64,13 +64,13 @@ namespace mongo {
/**
* @return a rough estimate of elapsed time since the server started
- todo:
- 1) consider adding some sort of relaxedLoad semantic to the reading here of
+ todo:
+ 1) consider adding some sort of relaxedLoad semantic to the reading here of
_elapsedTime
2) curTimeMillis() implementations have gotten faster. consider eliminating
- this code? would have to measure it first. if eliminated be careful if
- syscall used isn't skewable. Note also if #2 is done, listen() doesn't
- then have to keep waking up and maybe that helps on a developer's laptop
+ this code? would have to measure it first. if eliminated be careful if
+ syscall used isn't skewable. Note also if #2 is done, listen() doesn't
+ then have to keep waking up and maybe that helps on a developer's laptop
battery usage...
*/
long long getMyElapsedTimeMillis() const { return _elapsedTime; }
@@ -113,19 +113,19 @@ namespace mongo {
bool _setupSocketsSuccessful;
bool _logConnect;
long long _elapsedTime;
- mutable boost::mutex _readyMutex; // Protects _ready
+ mutable stdx::mutex _readyMutex; // Protects _ready
mutable boost::condition_variable _readyCondition; // Used to wait for changes to _ready
// Boolean that indicates whether this Listener is ready to accept incoming network requests
bool _ready;
-
+
#ifdef MONGO_CONFIG_SSL
SSLManagerInterface* _ssl;
#endif
-
+
void _logListen( int port , bool ssl );
static const Listener* _timeTracker;
-
+
virtual bool useUnixSockets() const { return false; }
public:
@@ -146,21 +146,21 @@ namespace mongo {
, _socketPaths( new std::set<std::string>() )
{ }
void add( int sock ) {
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
_sockets->insert( sock );
}
void addPath( const std::string& path ) {
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
_socketPaths->insert( path );
}
void remove( int sock ) {
- boost::lock_guard<boost::mutex> lk( _mutex );
+ stdx::lock_guard<stdx::mutex> lk( _mutex );
_sockets->erase( sock );
}
void closeAll();
static ListeningSockets* get();
private:
- boost::mutex _mutex;
+ stdx::mutex _mutex;
std::set<int>* _sockets;
std::set<std::string>* _socketPaths; // for unix domain sockets
static ListeningSockets* _instance;
diff --git a/src/mongo/util/net/message_port.cpp b/src/mongo/util/net/message_port.cpp
index be05fbd1aff..f35a4883d0d 100644
--- a/src/mongo/util/net/message_port.cpp
+++ b/src/mongo/util/net/message_port.cpp
@@ -117,7 +117,7 @@ namespace mongo {
public:
Ports() : ports() {}
void closeAll(unsigned skip_mask) {
- boost::lock_guard<boost::mutex> bl(m);
+ stdx::lock_guard<stdx::mutex> bl(m);
for ( std::set<MessagingPort*>::iterator i = ports.begin(); i != ports.end(); i++ ) {
if( (*i)->tag & skip_mask )
continue;
@@ -125,11 +125,11 @@ namespace mongo {
}
}
void insert(MessagingPort* p) {
- boost::lock_guard<boost::mutex> bl(m);
+ stdx::lock_guard<stdx::mutex> bl(m);
ports.insert(p);
}
void erase(MessagingPort* p) {
- boost::lock_guard<boost::mutex> bl(m);
+ stdx::lock_guard<stdx::mutex> bl(m);
ports.erase(p);
}
};
diff --git a/src/mongo/util/signal_handlers_synchronous.cpp b/src/mongo/util/signal_handlers_synchronous.cpp
index e0d69350203..534cadd8521 100644
--- a/src/mongo/util/signal_handlers_synchronous.cpp
+++ b/src/mongo/util/signal_handlers_synchronous.cpp
@@ -109,7 +109,7 @@ namespace {
// If in the future, we decide to be more strict about posix signal safety, we could switch to
// an atomic test-and-set loop, possibly with a mechanism for detecting signals raised while
// handling other signals.
- boost::mutex streamMutex;
+ stdx::mutex streamMutex;
// must hold streamMutex to call
void writeMallocFreeStreamToLog() {
@@ -131,7 +131,7 @@ namespace {
// this will be called in certain c++ error cases, for example if there are two active
// exceptions
void myTerminate() {
- boost::lock_guard<boost::mutex> lk(streamMutex);
+ stdx::lock_guard<stdx::mutex> lk(streamMutex);
// In c++11 we can recover the current exception to print it.
if (std::exception_ptr eptr = std::current_exception()) {
@@ -191,7 +191,7 @@ namespace {
}
void abruptQuit(int signalNum) {
- boost::lock_guard<boost::mutex> lk(streamMutex);
+ stdx::lock_guard<stdx::mutex> lk(streamMutex);
printSignalAndBacktrace(signalNum);
// Don't go through normal shutdown procedure. It may make things worse.
@@ -230,7 +230,7 @@ namespace {
#else
void abruptQuitWithAddrSignal( int signalNum, siginfo_t *siginfo, void * ) {
- boost::lock_guard<boost::mutex> lk(streamMutex);
+ stdx::lock_guard<stdx::mutex> lk(streamMutex);
const char* action = (signalNum == SIGSEGV || signalNum == SIGBUS) ? "access" : "operation";
mallocFreeOStream << "Invalid " << action << " at address: " << siginfo->si_addr;
@@ -284,7 +284,7 @@ namespace {
}
void reportOutOfMemoryErrorAndExit() {
- boost::lock_guard<boost::mutex> lk(streamMutex);
+ stdx::lock_guard<stdx::mutex> lk(streamMutex);
printStackTrace(mallocFreeOStream << "out of memory.\n");
writeMallocFreeStreamToLog();
quickExit(EXIT_ABRUPT);