summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/mongo/client/clientAndShell.cpp4
-rw-r--r--src/mongo/client/clientOnly-private.h6
-rw-r--r--src/mongo/client/connpool.cpp21
-rw-r--r--src/mongo/client/dbclient.cpp4
-rw-r--r--src/mongo/client/dbclientinterface.h4
-rw-r--r--src/mongo/client/replica_set_monitor.cpp29
-rw-r--r--src/mongo/client/scoped_db_conn_test.cpp10
-rw-r--r--src/mongo/client/syncclusterconnection.cpp22
-rw-r--r--src/mongo/db/commands/dbhash.cpp17
-rw-r--r--src/mongo/db/global_optime.cpp8
-rw-r--r--src/mongo/db/instance.cpp11
-rw-r--r--src/mongo/db/range_deleter.cpp41
-rw-r--r--src/mongo/db/range_deleter_mock_env.cpp34
-rw-r--r--src/mongo/db/repl/oplog.cpp10
-rw-r--r--src/mongo/db/stats/snapshots.cpp6
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_journal.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_recover.cpp7
-rw-r--r--src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp14
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h2
-rw-r--r--src/mongo/dbtests/documentsourcetests.cpp11
-rw-r--r--src/mongo/dbtests/framework.cpp8
-rw-r--r--src/mongo/dbtests/mock/mock_conn_registry.cpp13
-rw-r--r--src/mongo/dbtests/perftests.cpp11
-rw-r--r--src/mongo/dbtests/threadedtests.cpp7
-rw-r--r--src/mongo/logger/ramlog.cpp10
-rw-r--r--src/mongo/s/chunk_manager.cpp4
-rw-r--r--src/mongo/s/client/shard_connection.cpp10
-rw-r--r--src/mongo/s/config.cpp35
-rw-r--r--src/mongo/s/config_server_checker_service.cpp6
-rw-r--r--src/mongo/s/cursors.cpp31
-rw-r--r--src/mongo/s/d_migrate.cpp75
-rw-r--r--src/mongo/s/d_state.cpp42
-rw-r--r--src/mongo/s/d_state.h2
-rw-r--r--src/mongo/s/distlock.cpp27
-rw-r--r--src/mongo/s/distlock.h5
-rw-r--r--src/mongo/s/grid.cpp18
-rw-r--r--src/mongo/s/grid.h1
-rw-r--r--src/mongo/s/shard.cpp27
-rw-r--r--src/mongo/s/version_manager.cpp12
-rw-r--r--src/mongo/scripting/engine.cpp6
-rw-r--r--src/mongo/scripting/engine_v8-3.25.cpp14
-rw-r--r--src/mongo/scripting/engine_v8.cpp16
-rw-r--r--src/mongo/scripting/v8_deadline_monitor.h11
-rw-r--r--src/mongo/scripting/v8_deadline_monitor_test.cpp8
-rw-r--r--src/mongo/shell/dbshell.cpp4
-rw-r--r--src/mongo/shell/shell_utils.cpp10
-rw-r--r--src/mongo/shell/shell_utils_launcher.cpp8
-rw-r--r--src/mongo/util/background.cpp40
-rw-r--r--src/mongo/util/background_job_test.cpp6
-rw-r--r--src/mongo/util/concurrency/mutex.h40
-rw-r--r--src/mongo/util/concurrency/synchronization.cpp22
-rw-r--r--src/mongo/util/concurrency/thread_pool.cpp14
-rw-r--r--src/mongo/util/concurrency/ticketholder.cpp19
-rw-r--r--src/mongo/util/fail_point.cpp11
-rw-r--r--src/mongo/util/fail_point.h5
-rw-r--r--src/mongo/util/file_allocator.cpp24
-rw-r--r--src/mongo/util/mmap_win.cpp12
-rw-r--r--src/mongo/util/net/listen.cpp2
-rw-r--r--src/mongo/util/net/listen.h11
-rw-r--r--src/mongo/util/net/message_port.cpp8
-rw-r--r--src/mongo/util/queue.h31
63 files changed, 407 insertions, 528 deletions
diff --git a/src/mongo/client/clientAndShell.cpp b/src/mongo/client/clientAndShell.cpp
index 259ff72468a..e791d18d4b1 100644
--- a/src/mongo/client/clientAndShell.cpp
+++ b/src/mongo/client/clientAndShell.cpp
@@ -55,12 +55,12 @@ namespace mongo {
// to avoid deadlocks at shutdown. So it also protects
// the global dbexitCalled.
namespace shell_utils {
- mongo::mutex &mongoProgramOutputMutex(*(new mongo::mutex("mongoProgramOutputMutex")));
+ mongo::mutex &mongoProgramOutputMutex(*(new boost::mutex()));
}
void dbexit( ExitCode returnCode, const char *whyMsg ) {
{
- mongo::mutex::scoped_lock lk( shell_utils::mongoProgramOutputMutex );
+ boost::lock_guard<boost::mutex> lk( shell_utils::mongoProgramOutputMutex );
dbexitCalled = true;
}
log() << "dbexit called" << endl;
diff --git a/src/mongo/client/clientOnly-private.h b/src/mongo/client/clientOnly-private.h
index 6d4549218f4..763c82bb857 100644
--- a/src/mongo/client/clientOnly-private.h
+++ b/src/mongo/client/clientOnly-private.h
@@ -27,12 +27,10 @@
#pragma once
-#include "mongo/client/export_macros.h"
+#include <boost/thread/mutex.hpp>
namespace mongo {
- class mutex;
-
namespace shell_utils {
- extern MONGO_CLIENT_API mongo::mutex &mongoProgramOutputMutex;
+ extern boost::mutex &mongoProgramOutputMutex;
}
}
diff --git a/src/mongo/client/connpool.cpp b/src/mongo/client/connpool.cpp
index 5cd702beaff..4b3002e1322 100644
--- a/src/mongo/client/connpool.cpp
+++ b/src/mongo/client/connpool.cpp
@@ -180,8 +180,7 @@ namespace mongo {
const int PoolForHost::kPoolSizeUnlimited(-1);
DBConnectionPool::DBConnectionPool()
- : _mutex("DBConnectionPool") ,
- _name( "dbconnectionpool" ) ,
+ : _name( "dbconnectionpool" ) ,
_maxPoolSize(PoolForHost::kPoolSizeUnlimited) ,
_hooks( new list<DBConnectionHook*>() ) {
}
@@ -189,7 +188,7 @@ namespace mongo {
DBClientBase* DBConnectionPool::_get(const string& ident , double socketTimeout ) {
uassert(17382, "Can't use connection pool during shutdown",
!inShutdown());
- scoped_lock L(_mutex);
+ boost::lock_guard<boost::mutex> L(_mutex);
PoolForHost& p = _pools[PoolKey(ident,socketTimeout)];
p.setMaxPoolSize(_maxPoolSize);
p.initializeHostName(ident);
@@ -198,7 +197,7 @@ namespace mongo {
DBClientBase* DBConnectionPool::_finishCreate( const string& host , double socketTimeout , DBClientBase* conn ) {
{
- scoped_lock L(_mutex);
+ boost::lock_guard<boost::mutex> L(_mutex);
PoolForHost& p = _pools[PoolKey(host,socketTimeout)];
p.setMaxPoolSize(_maxPoolSize);
p.initializeHostName(host);
@@ -273,7 +272,7 @@ namespace mongo {
void DBConnectionPool::release(const string& host, DBClientBase *c) {
onRelease(c);
- scoped_lock L(_mutex);
+ boost::lock_guard<boost::mutex> L(_mutex);
_pools[PoolKey(host,c->getSoTimeout())].done(this,c);
}
@@ -283,7 +282,7 @@ namespace mongo {
}
void DBConnectionPool::flush() {
- scoped_lock L(_mutex);
+ boost::lock_guard<boost::mutex> L(_mutex);
for ( PoolMap::iterator i = _pools.begin(); i != _pools.end(); i++ ) {
PoolForHost& p = i->second;
p.flush();
@@ -291,7 +290,7 @@ namespace mongo {
}
void DBConnectionPool::clear() {
- scoped_lock L(_mutex);
+ boost::lock_guard<boost::mutex> L(_mutex);
LOG(2) << "Removing connections on all pools owned by " << _name << endl;
for (PoolMap::iterator iter = _pools.begin(); iter != _pools.end(); ++iter) {
iter->second.clear();
@@ -299,7 +298,7 @@ namespace mongo {
}
void DBConnectionPool::removeHost( const string& host ) {
- scoped_lock L(_mutex);
+ boost::lock_guard<boost::mutex> L(_mutex);
LOG(2) << "Removing connections from all pools for host: " << host << endl;
for ( PoolMap::iterator i = _pools.begin(); i != _pools.end(); ++i ) {
const string& poolHost = i->first.ident;
@@ -351,7 +350,7 @@ namespace mongo {
BSONObjBuilder bb( b.subobjStart( "hosts" ) );
{
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
for ( PoolMap::iterator i=_pools.begin(); i!=_pools.end(); ++i ) {
if ( i->second.numCreated() == 0 )
continue;
@@ -448,7 +447,7 @@ namespace mongo {
}
{
- scoped_lock sl(_mutex);
+ boost::lock_guard<boost::mutex> sl(_mutex);
PoolForHost& pool = _pools[PoolKey(hostName, conn->getSoTimeout())];
if (pool.isBadSocketCreationTime(conn->getSockCreationMicroSec())) {
return false;
@@ -464,7 +463,7 @@ namespace mongo {
{
// we need to get the connections inside the lock
// but we can actually delete them outside
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
for ( PoolMap::iterator i=_pools.begin(); i!=_pools.end(); ++i ) {
i->second.getStaleConnections( toDelete );
}
diff --git a/src/mongo/client/dbclient.cpp b/src/mongo/client/dbclient.cpp
index be3d999a092..1018827dc8e 100644
--- a/src/mongo/client/dbclient.cpp
+++ b/src/mongo/client/dbclient.cpp
@@ -112,7 +112,7 @@ namespace mongo {
_string = ss.str();
}
- mutex ConnectionString::_connectHookMutex( "ConnectionString::_connectHook" );
+ mutex ConnectionString::_connectHookMutex;
ConnectionString::ConnectionHook* ConnectionString::_connectHook = NULL;
DBClientBase* ConnectionString::connect( string& errmsg, double socketTimeout ) const {
@@ -154,7 +154,7 @@ namespace mongo {
case CUSTOM: {
// Lock in case other things are modifying this at the same time
- scoped_lock lk( _connectHookMutex );
+ boost::lock_guard<boost::mutex> lk( _connectHookMutex );
// Allow the replacement of connections with other connections - useful for testing.
diff --git a/src/mongo/client/dbclientinterface.h b/src/mongo/client/dbclientinterface.h
index 4f63c21db92..2c2e9b86b3f 100644
--- a/src/mongo/client/dbclientinterface.h
+++ b/src/mongo/client/dbclientinterface.h
@@ -313,12 +313,12 @@ namespace mongo {
};
static void setConnectionHook( ConnectionHook* hook ){
- scoped_lock lk( _connectHookMutex );
+ boost::lock_guard<boost::mutex> lk( _connectHookMutex );
_connectHook = hook;
}
static ConnectionHook* getConnectionHook() {
- scoped_lock lk( _connectHookMutex );
+ boost::lock_guard<boost::mutex> lk( _connectHookMutex );
return _connectHook;
}
diff --git a/src/mongo/client/replica_set_monitor.cpp b/src/mongo/client/replica_set_monitor.cpp
index 57de0e616d8..f3d25a97cb6 100644
--- a/src/mongo/client/replica_set_monitor.cpp
+++ b/src/mongo/client/replica_set_monitor.cpp
@@ -99,7 +99,7 @@ namespace {
* SetState::mutex without holder setsLock, but then you can't grab setsLock until you
* release the SetState::mutex.
*/
- mongo::mutex setsLock("ReplicaSetMonitor");
+ mongo::mutex setsLock;
StringMap<set<HostAndPort> > seedServers;
StringMap<ReplicaSetMonitorPtr> sets;
@@ -107,7 +107,6 @@ namespace {
class ReplicaSetMonitorWatcher : public BackgroundJob {
public:
ReplicaSetMonitorWatcher():
- _monitorMutex("ReplicaSetMonitorWatcher::_safego"),
_started(false),
_stopRequested(false) {
}
@@ -126,7 +125,7 @@ namespace {
virtual string name() const { return "ReplicaSetMonitorWatcher"; }
void safeGo() {
- scoped_lock lk( _monitorMutex );
+ boost::lock_guard<boost::mutex> lk( _monitorMutex );
if ( _started )
return;
@@ -140,7 +139,7 @@ namespace {
* Stops monitoring the sets and wait for the monitoring thread to terminate.
*/
void stop() {
- scoped_lock sl( _monitorMutex );
+ boost::lock_guard<boost::mutex> sl( _monitorMutex );
_stopRequested = true;
_stopRequestedCV.notify_one();
}
@@ -154,14 +153,14 @@ namespace {
// Should not be needed after SERVER-7533 gets implemented and tests start
// using it.
if (!inShutdown() && !StaticObserver::_destroyingStatics) {
- scoped_lock sl( _monitorMutex );
- _stopRequestedCV.timed_wait(sl.boost(), boost::posix_time::seconds(10));
+ boost::unique_lock<boost::mutex> sl( _monitorMutex );
+ _stopRequestedCV.timed_wait(sl, boost::posix_time::seconds(10));
}
while ( !inShutdown() &&
!StaticObserver::_destroyingStatics ) {
{
- scoped_lock sl( _monitorMutex );
+ boost::lock_guard<boost::mutex> sl( _monitorMutex );
if (_stopRequested) {
break;
}
@@ -177,12 +176,12 @@ namespace {
error() << "unknown error";
}
- scoped_lock sl( _monitorMutex );
+ boost::unique_lock<boost::mutex> sl( _monitorMutex );
if (_stopRequested) {
break;
}
- _stopRequestedCV.timed_wait(sl.boost(), boost::posix_time::seconds(10));
+ _stopRequestedCV.timed_wait(sl, boost::posix_time::seconds(10));
}
}
@@ -190,7 +189,7 @@ namespace {
// make a copy so we can quickly unlock setsLock
StringMap<ReplicaSetMonitorPtr> setsCopy;
{
- scoped_lock lk( setsLock );
+ boost::lock_guard<boost::mutex> lk( setsLock );
setsCopy = sets;
}
@@ -362,7 +361,7 @@ namespace {
void ReplicaSetMonitor::createIfNeeded(const string& name, const set<HostAndPort>& servers) {
LOG(3) << "ReplicaSetMonitor::createIfNeeded " << name;
- scoped_lock lk(setsLock);
+ boost::lock_guard<boost::mutex> lk(setsLock);
ReplicaSetMonitorPtr& m = sets[name];
if ( ! m )
m = boost::make_shared<ReplicaSetMonitor>( name , servers );
@@ -373,7 +372,7 @@ namespace {
ReplicaSetMonitorPtr ReplicaSetMonitor::get(const std::string& name,
const bool createFromSeed) {
LOG(3) << "ReplicaSetMonitor::get " << name;
- scoped_lock lk( setsLock );
+ boost::lock_guard<boost::mutex> lk( setsLock );
StringMap<ReplicaSetMonitorPtr>::const_iterator i = sets.find( name );
if ( i != sets.end() ) {
return i->second;
@@ -394,7 +393,7 @@ namespace {
set<string> ReplicaSetMonitor::getAllTrackedSets() {
set<string> activeSets;
- scoped_lock lk( setsLock );
+ boost::lock_guard<boost::mutex> lk( setsLock );
for (StringMap<ReplicaSetMonitorPtr>::const_iterator it = sets.begin();
it != sets.end(); ++it)
{
@@ -407,7 +406,7 @@ namespace {
LOG(2) << "Removing ReplicaSetMonitor for " << name << " from replica set table"
<< (clearSeedCache ? " and the seed cache" : "");
- scoped_lock lk( setsLock );
+ boost::lock_guard<boost::mutex> lk( setsLock );
const StringMap<ReplicaSetMonitorPtr>::const_iterator setIt = sets.find(name);
if (setIt != sets.end()) {
if (!clearSeedCache) {
@@ -474,7 +473,7 @@ namespace {
replicaSetMonitorWatcher.cancel();
replicaSetMonitorWatcher.stop();
replicaSetMonitorWatcher.wait();
- scoped_lock lock(setsLock);
+ boost::lock_guard<boost::mutex> lock(setsLock);
sets = StringMap<ReplicaSetMonitorPtr>();
seedServers = StringMap<set<HostAndPort> >();
}
diff --git a/src/mongo/client/scoped_db_conn_test.cpp b/src/mongo/client/scoped_db_conn_test.cpp
index b6ce00e0717..600b54d2908 100644
--- a/src/mongo/client/scoped_db_conn_test.cpp
+++ b/src/mongo/client/scoped_db_conn_test.cpp
@@ -62,7 +62,7 @@ namespace {
const string TARGET_HOST = "localhost:27017";
const int TARGET_PORT = 27017;
- mongo::mutex shutDownMutex("shutDownMutex");
+ mongo::mutex shutDownMutex;
bool shuttingDown = false;
}
@@ -73,7 +73,7 @@ namespace mongo {
// Symbols defined to build the binary correctly.
bool inShutdown() {
- scoped_lock sl(shutDownMutex);
+ boost::lock_guard<boost::mutex> sl(shutDownMutex);
return shuttingDown;
}
@@ -81,7 +81,7 @@ namespace mongo {
void dbexit(ExitCode rc, const char *why){
{
- scoped_lock sl(shutDownMutex);
+ boost::lock_guard<boost::mutex> sl(shutDownMutex);
shuttingDown = true;
}
@@ -157,7 +157,7 @@ namespace mongo_test {
options.port = _port;
{
- mongo::mutex::scoped_lock sl(shutDownMutex);
+ boost::lock_guard<boost::mutex> sl(shutDownMutex);
shuttingDown = false;
}
@@ -174,7 +174,7 @@ namespace mongo_test {
}
{
- mongo::mutex::scoped_lock sl(shutDownMutex);
+ boost::lock_guard<boost::mutex> sl(shutDownMutex);
shuttingDown = true;
}
diff --git a/src/mongo/client/syncclusterconnection.cpp b/src/mongo/client/syncclusterconnection.cpp
index 762a2d69ea0..29f184f3689 100644
--- a/src/mongo/client/syncclusterconnection.cpp
+++ b/src/mongo/client/syncclusterconnection.cpp
@@ -52,7 +52,9 @@ namespace mongo {
using std::stringstream;
using std::vector;
- SyncClusterConnection::SyncClusterConnection( const list<HostAndPort> & L, double socketTimeout) : _mutex("SyncClusterConnection"), _socketTimeout( socketTimeout ) {
+ SyncClusterConnection::SyncClusterConnection( const list<HostAndPort> & L, double socketTimeout)
+ : _socketTimeout(socketTimeout) {
+
{
stringstream s;
int n=0;
@@ -66,7 +68,9 @@ namespace mongo {
_connect( i->toString() );
}
- SyncClusterConnection::SyncClusterConnection( string commaSeparated, double socketTimeout) : _mutex("SyncClusterConnection"), _socketTimeout( socketTimeout ) {
+ SyncClusterConnection::SyncClusterConnection( string commaSeparated, double socketTimeout) :
+ _socketTimeout( socketTimeout ) {
+
_address = commaSeparated;
string::size_type idx;
while ( ( idx = commaSeparated.find( ',' ) ) != string::npos ) {
@@ -78,7 +82,12 @@ namespace mongo {
uassert( 8004 , "SyncClusterConnection needs 3 servers" , _conns.size() == 3 );
}
- SyncClusterConnection::SyncClusterConnection( const std::string& a , const std::string& b , const std::string& c, double socketTimeout) : _mutex("SyncClusterConnection"), _socketTimeout( socketTimeout ) {
+ SyncClusterConnection::SyncClusterConnection(
+ const std::string& a,
+ const std::string& b,
+ const std::string& c,
+ double socketTimeout) : _socketTimeout( socketTimeout ) {
+
_address = a + "," + b + "," + c;
// connect to all even if not working
_connect( a );
@@ -86,7 +95,8 @@ namespace mongo {
_connect( c );
}
- SyncClusterConnection::SyncClusterConnection( SyncClusterConnection& prev, double socketTimeout) : _mutex("SyncClusterConnection"), _socketTimeout( socketTimeout ) {
+ SyncClusterConnection::SyncClusterConnection( SyncClusterConnection& prev, double socketTimeout)
+ : _socketTimeout(socketTimeout) {
verify(0);
}
@@ -530,7 +540,7 @@ namespace mongo {
int SyncClusterConnection::_lockType( const string& name ) {
{
- scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
map<string,int>::iterator i = _lockTypes.find( name );
if ( i != _lockTypes.end() )
return i->second;
@@ -541,7 +551,7 @@ namespace mongo {
int lockType = info["lockType"].numberInt();
- scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
_lockTypes[name] = lockType;
return lockType;
}
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index 9d42024210d..bf0f43dcc9b 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -62,9 +62,7 @@ namespace mongo {
// ----
- DBHashCmd::DBHashCmd()
- : Command( "dbHash", false, "dbhash" ),
- _cachedHashedMutex( "_cachedHashedMutex" ){
+ DBHashCmd::DBHashCmd() : Command("dbHash", false, "dbhash") {
}
void DBHashCmd::addRequiredPrivileges(const std::string& dbname,
@@ -75,11 +73,14 @@ namespace mongo {
out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
}
- string DBHashCmd::hashCollection( OperationContext* opCtx, Database* db, const string& fullCollectionName, bool* fromCache ) {
- scoped_ptr<scoped_lock> cachedHashedLock;
+ std::string DBHashCmd::hashCollection(OperationContext* opCtx,
+ Database* db,
+ const std::string& fullCollectionName,
+ bool* fromCache) {
+ boost::unique_lock<boost::mutex> cachedHashedLock(_cachedHashedMutex, boost::defer_lock);
if ( isCachable( fullCollectionName ) ) {
- cachedHashedLock.reset( new scoped_lock( _cachedHashedMutex ) );
+ cachedHashedLock.lock();
string hash = _cachedHashed[fullCollectionName];
if ( hash.size() > 0 ) {
*fromCache = true;
@@ -133,7 +134,7 @@ namespace mongo {
md5_finish(&st, d);
string hash = digestToString( d );
- if ( cachedHashedLock.get() ) {
+ if (cachedHashedLock.owns_lock()) {
_cachedHashed[fullCollectionName] = hash;
}
@@ -225,7 +226,7 @@ namespace mongo {
}
void commit() {
- scoped_lock lk( _dCmd->_cachedHashedMutex );
+ boost::lock_guard<boost::mutex> lk( _dCmd->_cachedHashedMutex );
_dCmd->_cachedHashed.erase(_ns);
}
void rollback() { }
diff --git a/src/mongo/db/global_optime.cpp b/src/mongo/db/global_optime.cpp
index e47c5f0f40d..664838a179c 100644
--- a/src/mongo/db/global_optime.cpp
+++ b/src/mongo/db/global_optime.cpp
@@ -33,7 +33,7 @@
#include "mongo/util/log.h"
namespace {
- mongo::mutex globalOptimeMutex("globalOptime");
+ mongo::mutex globalOptimeMutex;
mongo::OpTime globalOpTime(0, 0);
bool skewed(const mongo::OpTime& val) {
@@ -49,17 +49,17 @@ namespace {
namespace mongo {
void setGlobalOptime(const OpTime& newTime) {
- mutex::scoped_lock lk(globalOptimeMutex);
+ boost::lock_guard<boost::mutex> lk(globalOptimeMutex);
globalOpTime = newTime;
}
OpTime getLastSetOptime() {
- mutex::scoped_lock lk(globalOptimeMutex);
+ boost::lock_guard<boost::mutex> lk(globalOptimeMutex);
return globalOpTime;
}
OpTime getNextGlobalOptime() {
- mutex::scoped_lock lk(globalOptimeMutex);
+ boost::lock_guard<boost::mutex> lk(globalOptimeMutex);
const unsigned now = (unsigned) time(0);
const unsigned globalSecs = globalOpTime.getSecs();
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index aa1cf74a859..cdb1ee82116 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -1186,8 +1186,7 @@ namespace mongo {
}
// ----- BEGIN Diaglog -----
- DiagLog::DiagLog() : f(0) , level(0), mutex("DiagLog") {
- }
+ DiagLog::DiagLog() : f(0), level(0) {}
void DiagLog::openFile() {
verify( f == 0 );
@@ -1206,7 +1205,7 @@ namespace mongo {
}
int DiagLog::setLevel( int newLevel ) {
- scoped_lock lk(mutex);
+ boost::lock_guard<boost::mutex> lk(mutex);
int old = level;
log() << "diagLogging level=" << newLevel << endl;
if( f == 0 ) {
@@ -1219,14 +1218,14 @@ namespace mongo {
void DiagLog::flush() {
if ( level ) {
log() << "flushing diag log" << endl;
- scoped_lock lk(mutex);
+ boost::lock_guard<boost::mutex> lk(mutex);
f->flush();
}
}
void DiagLog::writeop(char *data,int len) {
if ( level & 1 ) {
- scoped_lock lk(mutex);
+ boost::lock_guard<boost::mutex> lk(mutex);
f->write(data,len);
}
}
@@ -1236,7 +1235,7 @@ namespace mongo {
bool log = (level & 4) == 0;
OCCASIONALLY log = true;
if ( log ) {
- scoped_lock lk(mutex);
+ boost::lock_guard<boost::mutex> lk(mutex);
verify( f );
f->write(data,len);
}
diff --git a/src/mongo/db/range_deleter.cpp b/src/mongo/db/range_deleter.cpp
index 232cdc1e57d..8d14aef3a98 100644
--- a/src/mongo/db/range_deleter.cpp
+++ b/src/mongo/db/range_deleter.cpp
@@ -158,11 +158,8 @@ namespace mongo {
RangeDeleter::RangeDeleter(RangeDeleterEnv* env):
_env(env), // ownership xfer
- _stopMutex("stopRangeDeleter"),
_stopRequested(false),
- _queueMutex("RangeDeleter"),
- _deletesInProgress(0),
- _statsHistoryMutex("RangeDeleterStatsHistory") {
+ _deletesInProgress(0) {
}
RangeDeleter::~RangeDeleter() {
@@ -200,7 +197,7 @@ namespace mongo {
void RangeDeleter::stopWorkers() {
{
- scoped_lock sl(_stopMutex);
+ boost::lock_guard<boost::mutex> sl(_stopMutex);
_stopRequested = true;
}
@@ -208,9 +205,9 @@ namespace mongo {
_worker->join();
}
- scoped_lock sl(_queueMutex);
+ boost::unique_lock<boost::mutex> sl(_queueMutex);
while (_deletesInProgress > 0) {
- _nothingInProgressCV.wait(sl.boost());
+ _nothingInProgressCV.wait(sl);
}
}
@@ -230,7 +227,7 @@ namespace mongo {
toDelete->notifyDone = notifyDone;
{
- scoped_lock sl(_queueMutex);
+ boost::lock_guard<boost::mutex> sl(_queueMutex);
if (_stopRequested) {
*errMsg = "deleter is already stopped.";
return false;
@@ -253,7 +250,7 @@ namespace mongo {
logCursorsWaiting(toDelete.get());
{
- scoped_lock sl(_queueMutex);
+ boost::lock_guard<boost::mutex> sl(_queueMutex);
if (toDelete->cursorsToWait.empty()) {
toDelete->stats.queueEndTS = jsTime();
@@ -320,7 +317,7 @@ namespace {
NSMinMax deleteRange(ns, min, max);
{
- scoped_lock sl(_queueMutex);
+ boost::lock_guard<boost::mutex> sl(_queueMutex);
if (!canEnqueue_inlock(ns, min, max, errMsg)) {
return false;
}
@@ -362,7 +359,7 @@ namespace {
if (stopRequested()) {
*errMsg = "deleter was stopped.";
- scoped_lock sl(_queueMutex);
+ boost::lock_guard<boost::mutex> sl(_queueMutex);
_deleteSet.erase(&deleteRange);
_deletesInProgress--;
@@ -395,7 +392,7 @@ namespace {
}
{
- scoped_lock sl(_queueMutex);
+ boost::lock_guard<boost::mutex> sl(_queueMutex);
_deleteSet.erase(&deleteRange);
_deletesInProgress--;
@@ -413,7 +410,7 @@ namespace {
stats->clear();
stats->reserve(kDeleteJobsHistory);
- scoped_lock sl(_statsHistoryMutex);
+ boost::lock_guard<boost::mutex> sl(_statsHistoryMutex);
for (std::deque<DeleteJobStats*>::const_iterator it = _statsHistory.begin();
it != _statsHistory.end(); ++it) {
stats->push_back(new DeleteJobStats(**it));
@@ -421,7 +418,7 @@ namespace {
}
BSONObj RangeDeleter::toBSON() const {
- scoped_lock sl(_queueMutex);
+ boost::lock_guard<boost::mutex> sl(_queueMutex);
BSONObjBuilder builder;
@@ -453,10 +450,10 @@ namespace {
RangeDeleteEntry* nextTask = NULL;
{
- scoped_lock sl(_queueMutex);
+ boost::unique_lock<boost::mutex> sl(_queueMutex);
while (_taskQueue.empty()) {
_taskQueueNotEmptyCV.timed_wait(
- sl.boost(), duration::milliseconds(kNotEmptyTimeoutMillis));
+ sl, duration::milliseconds(kNotEmptyTimeoutMillis));
if (stopRequested()) {
log() << "stopping range deleter worker" << endl;
@@ -539,7 +536,7 @@ namespace {
}
{
- scoped_lock sl(_queueMutex);
+ boost::lock_guard<boost::mutex> sl(_queueMutex);
NSMinMax setEntry(nextTask->options.range.ns,
nextTask->options.range.minKey,
@@ -574,27 +571,27 @@ namespace {
}
bool RangeDeleter::stopRequested() const {
- scoped_lock sl(_stopMutex);
+ boost::lock_guard<boost::mutex> sl(_stopMutex);
return _stopRequested;
}
size_t RangeDeleter::getTotalDeletes() const {
- scoped_lock sl(_queueMutex);
+ boost::lock_guard<boost::mutex> sl(_queueMutex);
return _deleteSet.size();
}
size_t RangeDeleter::getPendingDeletes() const {
- scoped_lock sl(_queueMutex);
+ boost::lock_guard<boost::mutex> sl(_queueMutex);
return _notReadyQueue.size() + _taskQueue.size();
}
size_t RangeDeleter::getDeletesInProgress() const {
- scoped_lock sl(_queueMutex);
+ boost::lock_guard<boost::mutex> sl(_queueMutex);
return _deletesInProgress;
}
void RangeDeleter::recordDelStats(DeleteJobStats* newStat) {
- scoped_lock sl(_statsHistoryMutex);
+ boost::lock_guard<boost::mutex> sl(_statsHistoryMutex);
if (_statsHistory.size() == kDeleteJobsHistory) {
delete _statsHistory.front();
_statsHistory.pop_front();
diff --git a/src/mongo/db/range_deleter_mock_env.cpp b/src/mongo/db/range_deleter_mock_env.cpp
index 0a56f1bcb7d..7bff0c9dcc8 100644
--- a/src/mongo/db/range_deleter_mock_env.cpp
+++ b/src/mongo/db/range_deleter_mock_env.cpp
@@ -52,59 +52,55 @@ namespace mongo {
}
RangeDeleterMockEnv::RangeDeleterMockEnv():
- _deleteListMutex("delList"),
- _cursorMapMutex("cursorMap"),
- _pauseDeleteMutex("pauseDelete"),
_pauseDelete(false),
_pausedCount(0),
- _envStatMutex("envStat"),
_getCursorsCallCount(0) {
setGlobalEnvironment(new GlobalEnvironmentNoop());
}
void RangeDeleterMockEnv::addCursorId(StringData ns, CursorId id) {
- scoped_lock sl(_cursorMapMutex);
+ boost::lock_guard<boost::mutex> sl(_cursorMapMutex);
_cursorMap[ns.toString()].insert(id);
}
void RangeDeleterMockEnv::removeCursorId(StringData ns, CursorId id) {
- scoped_lock sl(_cursorMapMutex);
+ boost::lock_guard<boost::mutex> sl(_cursorMapMutex);
_cursorMap[ns.toString()].erase(id);
}
void RangeDeleterMockEnv::pauseDeletes() {
- scoped_lock sl(_pauseDeleteMutex);
+ boost::lock_guard<boost::mutex> sl(_pauseDeleteMutex);
_pauseDelete = true;
}
void RangeDeleterMockEnv::resumeOneDelete() {
- scoped_lock sl(_pauseDeleteMutex);
+ boost::lock_guard<boost::mutex> sl(_pauseDeleteMutex);
_pauseDelete = false;
_pausedCV.notify_one();
}
void RangeDeleterMockEnv::waitForNthGetCursor(uint64_t nthCall) {
- scoped_lock sl(_envStatMutex);
+ boost::unique_lock<boost::mutex> sl(_envStatMutex);
while (_getCursorsCallCount < nthCall) {
- _cursorsCallCountUpdatedCV.wait(sl.boost());
+ _cursorsCallCountUpdatedCV.wait(sl);
}
}
void RangeDeleterMockEnv::waitForNthPausedDelete(uint64_t nthPause) {
- scoped_lock sl(_pauseDeleteMutex);
+ boost::unique_lock<boost::mutex> sl(_pauseDeleteMutex);
while(_pausedCount < nthPause) {
- _pausedDeleteChangeCV.wait(sl.boost());
+ _pausedDeleteChangeCV.wait(sl);
}
}
bool RangeDeleterMockEnv::deleteOccured() const {
- scoped_lock sl(_deleteListMutex);
+ boost::lock_guard<boost::mutex> sl(_deleteListMutex);
return !_deleteList.empty();
}
DeletedRange RangeDeleterMockEnv::getLastDelete() const {
- scoped_lock sl(_deleteListMutex);
+ boost::lock_guard<boost::mutex> sl(_deleteListMutex);
return _deleteList.back();
}
@@ -114,7 +110,7 @@ namespace mongo {
string* errMsg) {
{
- scoped_lock sl(_pauseDeleteMutex);
+ boost::unique_lock<boost::mutex> sl(_pauseDeleteMutex);
bool wasInitiallyPaused = _pauseDelete;
if (_pauseDelete) {
@@ -123,14 +119,14 @@ namespace mongo {
}
while (_pauseDelete) {
- _pausedCV.wait(sl.boost());
+ _pausedCV.wait(sl);
}
_pauseDelete = wasInitiallyPaused;
}
{
- scoped_lock sl(_deleteListMutex);
+ boost::lock_guard<boost::mutex> sl(_deleteListMutex);
DeletedRange entry;
entry.ns = taskDetails.options.range.ns;
@@ -147,13 +143,13 @@ namespace mongo {
void RangeDeleterMockEnv::getCursorIds(
OperationContext* txn, StringData ns, set<CursorId>* in) {
{
- scoped_lock sl(_cursorMapMutex);
+ boost::lock_guard<boost::mutex> sl(_cursorMapMutex);
const set<CursorId>& _cursors = _cursorMap[ns.toString()];
std::copy(_cursors.begin(), _cursors.end(), inserter(*in, in->begin()));
}
{
- scoped_lock sl(_envStatMutex);
+ boost::lock_guard<boost::mutex> sl(_envStatMutex);
_getCursorsCallCount++;
_cursorsCallCountUpdatedCV.notify_one();
}
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 76ba3f7ec20..7d4eace3154 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -91,7 +91,7 @@ namespace {
// Synchronizes the section where a new OpTime is generated and when it actually
// appears in the oplog.
- mongo::mutex newOpMutex("oplogNewOp");
+ mongo::mutex newOpMutex;
boost::condition newOptimeNotifier;
static std::string _oplogCollectionName;
@@ -118,7 +118,7 @@ namespace {
const char* ns,
ReplicationCoordinator* replCoord,
const char* opstr) {
- mutex::scoped_lock lk(newOpMutex);
+ boost::lock_guard<boost::mutex> lk(newOpMutex);
OpTime ts = getNextGlobalOptime();
newOptimeNotifier.notify_all();
@@ -665,17 +665,17 @@ namespace {
}
void waitUpToOneSecondForOptimeChange(const OpTime& referenceTime) {
- mutex::scoped_lock lk(newOpMutex);
+ boost::unique_lock<boost::mutex> lk(newOpMutex);
while (referenceTime == getLastSetOptime()) {
- if (!newOptimeNotifier.timed_wait(lk.boost(),
+ if (!newOptimeNotifier.timed_wait(lk,
boost::posix_time::seconds(1)))
return;
}
}
void setNewOptime(const OpTime& newTime) {
- mutex::scoped_lock lk(newOpMutex);
+ boost::lock_guard<boost::mutex> lk(newOpMutex);
setGlobalOptime(newTime);
newOptimeNotifier.notify_all();
}
diff --git a/src/mongo/db/stats/snapshots.cpp b/src/mongo/db/stats/snapshots.cpp
index b44ef9812f6..ccae800a0f5 100644
--- a/src/mongo/db/stats/snapshots.cpp
+++ b/src/mongo/db/stats/snapshots.cpp
@@ -73,14 +73,14 @@ namespace mongo {
}
Snapshots::Snapshots(int n)
- : _lock("Snapshots"), _n(n)
+ : _n(n)
, _snapshots(new SnapshotData[n])
, _loc(0)
, _stored(0)
{}
const SnapshotData* Snapshots::takeSnapshot() {
- scoped_lock lk(_lock);
+ boost::lock_guard<boost::mutex> lk(_lock);
_loc = ( _loc + 1 ) % _n;
_snapshots[_loc].takeSnapshot();
if ( _stored < _n )
@@ -89,7 +89,7 @@ namespace mongo {
}
auto_ptr<SnapshotDelta> Snapshots::computeDelta( int numBack ) {
- scoped_lock lk(_lock);
+ boost::lock_guard<boost::mutex> lk(_lock);
auto_ptr<SnapshotDelta> p;
if ( numBack < numDeltas() )
p.reset( new SnapshotDelta( getPrev(numBack+1) , getPrev(numBack) ) );
diff --git a/src/mongo/db/storage/mmap_v1/dur_journal.cpp b/src/mongo/db/storage/mmap_v1/dur_journal.cpp
index 4dd2a39e39f..369ed9a4075 100644
--- a/src/mongo/db/storage/mmap_v1/dur_journal.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_journal.cpp
@@ -159,9 +159,9 @@ namespace mongo {
namespace {
SecureRandom* mySecureRandom = NULL;
- mongo::mutex mySecureRandomMutex( "JHeader-SecureRandom" );
+ mongo::mutex mySecureRandomMutex;
int64_t getMySecureRandomNumber() {
- scoped_lock lk( mySecureRandomMutex );
+ boost::lock_guard<boost::mutex> lk( mySecureRandomMutex );
if ( ! mySecureRandom )
mySecureRandom = SecureRandom::create();
return mySecureRandom->nextInt64();
diff --git a/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp b/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp
index 6c2021b37b1..1a0d03d96a4 100644
--- a/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp
@@ -136,7 +136,7 @@ namespace mongo {
(although not assured) that it is journaled here once.
*/
static void prepBasicWrites(AlignedBuilder& bb, const std::vector<WriteIntent>& intents) {
- scoped_lock lk(privateViews._mutex());
+ boost::lock_guard<boost::mutex> lk(privateViews._mutex());
// Each time write intents switch to a different database we journal a JDbContext.
// Switches will be rare as we sort by memory location first and we batch commit.
diff --git a/src/mongo/db/storage/mmap_v1/dur_recover.cpp b/src/mongo/db/storage/mmap_v1/dur_recover.cpp
index 4ca797afc89..9297472057b 100644
--- a/src/mongo/db/storage/mmap_v1/dur_recover.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_recover.cpp
@@ -253,8 +253,7 @@ namespace mongo {
RecoveryJob::RecoveryJob()
- : _mx("recovery"),
- _recovering(false),
+ : _recovering(false),
_lastDataSyncedFromLastRun(0),
_lastSeqMentionedInConsoleLog(1) {
@@ -268,7 +267,7 @@ namespace mongo {
}
void RecoveryJob::close() {
- scoped_lock lk(_mx);
+ boost::lock_guard<boost::mutex> lk(_mx);
_close();
}
@@ -388,7 +387,7 @@ namespace mongo {
void RecoveryJob::processSection(const JSectHeader *h, const void *p, unsigned len, const JSectFooter *f) {
LockMongoFilesShared lkFiles; // for RecoveryJob::Last
- scoped_lock lk(_mx);
+ boost::lock_guard<boost::mutex> lk(_mx);
// Check the footer checksum before doing anything else.
if (_recovering) {
diff --git a/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp b/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
index 3a2d2b2db9a..197cb45e844 100644
--- a/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
+++ b/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
@@ -85,7 +85,7 @@ namespace mongo {
/** de-register view. threadsafe */
void PointerToDurableMappedFile::remove(void *view, size_t len) {
if( view ) {
- mutex::scoped_lock lk(_m);
+ boost::lock_guard<boost::mutex> lk(_m);
clearWritableBits_inlock(view, len);
_views.erase(view);
}
@@ -93,7 +93,7 @@ namespace mongo {
#ifdef _WIN32
void PointerToDurableMappedFile::clearWritableBits(void *privateView, size_t len) {
- mutex::scoped_lock lk(_m);
+ boost::lock_guard<boost::mutex> lk(_m);
clearWritableBits_inlock(privateView, len);
}
@@ -110,7 +110,7 @@ namespace mongo {
extern mutex mapViewMutex;
__declspec(noinline) void PointerToDurableMappedFile::makeChunkWritable(size_t chunkno) {
- mutex::scoped_lock lkPrivateViews(_m);
+ boost::lock_guard<boost::mutex> lkPrivateViews(_m);
if (writable.get(chunkno)) // double check lock
return;
@@ -120,7 +120,7 @@ namespace mongo {
size_t chunkStart = chunkno * MemoryMappedCOWBitset::ChunkSize;
size_t chunkNext = chunkStart + MemoryMappedCOWBitset::ChunkSize;
- scoped_lock lkMapView(mapViewMutex);
+ boost::lock_guard<boost::mutex> lkMapView(mapViewMutex);
map<void*, DurableMappedFile*>::iterator i = _views.upper_bound((void*)(chunkNext - 1));
while (1) {
@@ -182,7 +182,7 @@ namespace mongo {
}
#endif
- PointerToDurableMappedFile::PointerToDurableMappedFile() : _m("PointerToDurableMappedFile") {
+ PointerToDurableMappedFile::PointerToDurableMappedFile() {
#if defined(SIZE_MAX)
size_t max = SIZE_MAX;
#else
@@ -225,7 +225,7 @@ namespace mongo {
@return the DurableMappedFile to which this pointer belongs. null if not found.
*/
DurableMappedFile* PointerToDurableMappedFile::find(void *p, /*out*/ size_t& ofs) {
- mutex::scoped_lock lk(_m);
+ boost::lock_guard<boost::mutex> lk(_m);
return find_inlock(p, ofs);
}
@@ -267,7 +267,7 @@ namespace mongo {
LOG(3) << "mmf finishOpening " << (void*) _view_write << ' ' << filename() << " len:" << length();
if( _view_write ) {
if (storageGlobalParams.dur) {
- scoped_lock lk2(privateViews._mutex());
+ boost::lock_guard<boost::mutex> lk2(privateViews._mutex());
_view_private = createPrivateMap();
if( _view_private == 0 ) {
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
index b6dda97e3eb..cb481ec9835 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
@@ -657,7 +657,7 @@ namespace mongo {
}
void MmapV1ExtentManager::FilesArray::push_back(DataFile* val) {
- scoped_lock lk(_writersMutex);
+ boost::lock_guard<boost::mutex> lk(_writersMutex);
const int n = _size.load();
invariant(n < DiskLoc::MaxFiles);
// Note ordering: _size update must come after updating the _files array
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h
index dadc17b0a1f..323386fdd56 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h
@@ -208,7 +208,7 @@ namespace mongo {
*/
class FilesArray {
public:
- FilesArray() : _writersMutex("MmapV1ExtentManager"), _size(0) { }
+ FilesArray() : _size(0) { }
~FilesArray();
/**
diff --git a/src/mongo/dbtests/documentsourcetests.cpp b/src/mongo/dbtests/documentsourcetests.cpp
index a0e98367238..ad812a971d0 100644
--- a/src/mongo/dbtests/documentsourcetests.cpp
+++ b/src/mongo/dbtests/documentsourcetests.cpp
@@ -284,19 +284,16 @@ namespace DocumentSourceTests {
/** Set a value or await an expected value. */
class PendingValue {
public:
- PendingValue( int initialValue ) :
- _value( initialValue ),
- _mutex( "DocumentSourceTests::PendingValue::_mutex" ) {
- }
+ PendingValue( int initialValue ) : _value( initialValue ) {}
void set( int newValue ) {
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
_value = newValue;
_condition.notify_all();
}
void await( int expectedValue ) const {
- scoped_lock lk( _mutex );
+ boost::unique_lock<boost::mutex> lk( _mutex );
while( _value != expectedValue ) {
- _condition.wait( lk.boost() );
+ _condition.wait( lk );
}
}
private:
diff --git a/src/mongo/dbtests/framework.cpp b/src/mongo/dbtests/framework.cpp
index d8b1bb9c472..2b859fc8c2c 100644
--- a/src/mongo/dbtests/framework.cpp
+++ b/src/mongo/dbtests/framework.cpp
@@ -63,7 +63,7 @@ namespace mongo {
namespace dbtests {
- mutex globalCurrentTestNameMutex("globalCurrentTestNameMutex");
+ mutex globalCurrentTestNameMutex;
std::string globalCurrentTestName;
class TestWatchDog : public BackgroundJob {
@@ -75,7 +75,7 @@ namespace mongo {
std::string lastRunningTestName, currentTestName;
{
- scoped_lock lk( globalCurrentTestNameMutex );
+ boost::lock_guard<boost::mutex> lk( globalCurrentTestNameMutex );
lastRunningTestName = globalCurrentTestName;
}
@@ -84,7 +84,7 @@ namespace mongo {
minutesRunning++;
{
- scoped_lock lk( globalCurrentTestNameMutex );
+ boost::lock_guard<boost::mutex> lk( globalCurrentTestNameMutex );
currentTestName = globalCurrentTestName;
}
@@ -138,6 +138,6 @@ namespace mongo {
} // namespace mongo
void mongo::unittest::onCurrentTestNameChange( const std::string &testName ) {
- scoped_lock lk( mongo::dbtests::globalCurrentTestNameMutex );
+ boost::lock_guard<boost::mutex> lk( mongo::dbtests::globalCurrentTestNameMutex );
mongo::dbtests::globalCurrentTestName = testName;
}
diff --git a/src/mongo/dbtests/mock/mock_conn_registry.cpp b/src/mongo/dbtests/mock/mock_conn_registry.cpp
index c50e177667f..92f1382c542 100644
--- a/src/mongo/dbtests/mock/mock_conn_registry.cpp
+++ b/src/mongo/dbtests/mock/mock_conn_registry.cpp
@@ -46,10 +46,7 @@ namespace mongo {
return Status::OK();
}
- MockConnRegistry::MockConnRegistry():
- _mockConnStrHook(this),
- _registryMutex("mockConnRegistryMutex") {
- }
+ MockConnRegistry::MockConnRegistry() : _mockConnStrHook(this) {}
MockConnRegistry* MockConnRegistry::get() {
return _instance.get();
@@ -60,7 +57,7 @@ namespace mongo {
}
void MockConnRegistry::addServer(MockRemoteDBServer* server) {
- scoped_lock sl(_registryMutex);
+ boost::lock_guard<boost::mutex> sl(_registryMutex);
const std::string hostName(server->getServerAddress());
fassert(16533, _registry.count(hostName) == 0);
@@ -69,17 +66,17 @@ namespace mongo {
}
bool MockConnRegistry::removeServer(const std::string& hostName) {
- scoped_lock sl(_registryMutex);
+ boost::lock_guard<boost::mutex> sl(_registryMutex);
return _registry.erase(hostName) == 1;
}
void MockConnRegistry::clear() {
- scoped_lock sl(_registryMutex);
+ boost::lock_guard<boost::mutex> sl(_registryMutex);
_registry.clear();
}
MockDBClientConnection* MockConnRegistry::connect(const std::string& connStr) {
- scoped_lock sl(_registryMutex);
+ boost::lock_guard<boost::mutex> sl(_registryMutex);
fassert(16534, _registry.count(connStr) == 1);
return new MockDBClientConnection(_registry[connStr], true);
}
diff --git a/src/mongo/dbtests/perftests.cpp b/src/mongo/dbtests/perftests.cpp
index 76ae98c56d3..38dfc814d8e 100644
--- a/src/mongo/dbtests/perftests.cpp
+++ b/src/mongo/dbtests/perftests.cpp
@@ -537,7 +537,6 @@ namespace PerfTests {
RWLock lk("testrw");
SimpleMutex m("simptst");
- mongo::mutex mtest("mtest");
boost::mutex mboost;
boost::timed_mutex mboost_timed;
std::mutex mstd;
@@ -554,15 +553,6 @@ namespace PerfTests {
c.notify_one();
}
};
- class mutexspeed : public B {
- public:
- string name() { return "mutex"; }
- virtual int howLongMillis() { return 500; }
- virtual bool showDurStats() { return false; }
- void timed() {
- mongo::mutex::scoped_lock lk(mtest);
- }
- };
class boostmutexspeed : public B {
public:
string name() { return "boost::mutex"; }
@@ -1474,7 +1464,6 @@ namespace PerfTests {
add< locker_contestedS >();
add< locker_uncontestedS >();
add< NotifyOne >();
- add< mutexspeed >();
add< simplemutexspeed >();
add< boostmutexspeed >();
add< boosttimed_mutexspeed >();
diff --git a/src/mongo/dbtests/threadedtests.cpp b/src/mongo/dbtests/threadedtests.cpp
index 62d8465e940..8085be0534e 100644
--- a/src/mongo/dbtests/threadedtests.cpp
+++ b/src/mongo/dbtests/threadedtests.cpp
@@ -754,17 +754,17 @@ namespace ThreadedTests {
class Hotel {
public:
- Hotel( int nRooms ) : _frontDesk( "frontDesk" ), _nRooms( nRooms ), _checkedIn( 0 ), _maxRooms( 0 ) {}
+ Hotel( int nRooms ) : _nRooms( nRooms ), _checkedIn( 0 ), _maxRooms( 0 ) {}
void checkIn(){
- scoped_lock lk( _frontDesk );
+ boost::lock_guard<boost::mutex> lk( _frontDesk );
_checkedIn++;
verify( _checkedIn <= _nRooms );
if( _checkedIn > _maxRooms ) _maxRooms = _checkedIn;
}
void checkOut(){
- scoped_lock lk( _frontDesk );
+ boost::lock_guard<boost::mutex> lk( _frontDesk );
_checkedIn--;
verify( _checkedIn >= 0 );
}
@@ -824,7 +824,6 @@ namespace ThreadedTests {
// Slack is a test to see how long it takes for another thread to pick up
// and begin work after another relinquishes the lock. e.g. a spin lock
// would have very little slack.
- add< Slack<mongo::mutex , mongo::mutex::scoped_lock > >();
add< Slack<SimpleMutex,SimpleMutex::scoped_lock> >();
add< Slack<SimpleRWLock,SimpleRWLock::Exclusive> >();
add< CondSlack >();
diff --git a/src/mongo/logger/ramlog.cpp b/src/mongo/logger/ramlog.cpp
index 36ac1ef5023..0ea9e324d96 100644
--- a/src/mongo/logger/ramlog.cpp
+++ b/src/mongo/logger/ramlog.cpp
@@ -206,10 +206,10 @@ namespace {
RamLog* RamLog::get(const std::string& name) {
if (!_namedLock) {
// Guaranteed to happen before multi-threaded operation.
- _namedLock = new mongo::mutex("RamLog::_namedLock");
+ _namedLock = new mongo::mutex();
}
- scoped_lock lk( *_namedLock );
+ boost::lock_guard<boost::mutex> lk( *_namedLock );
if (!_named) {
// Guaranteed to happen before multi-threaded operation.
_named = new RM();
@@ -226,7 +226,7 @@ namespace {
RamLog* RamLog::getIfExists(const std::string& name) {
if (!_named)
return NULL;
- scoped_lock lk(*_namedLock);
+ boost::lock_guard<boost::mutex> lk(*_namedLock);
return mapFindWithDefault(*_named, name, static_cast<RamLog*>(NULL));
}
@@ -234,7 +234,7 @@ namespace {
if ( ! _named )
return;
- scoped_lock lk( *_namedLock );
+ boost::lock_guard<boost::mutex> lk( *_namedLock );
for ( RM::iterator i=_named->begin(); i!=_named->end(); ++i ) {
if ( i->second->n )
names.push_back( i->first );
@@ -251,7 +251,7 @@ namespace {
return Status(ErrorCodes::InternalError,
"Inconsistent intiailization of RamLogCatalog.");
}
- _namedLock = new mongo::mutex("RamLog::_namedLock");
+ _namedLock = new mongo::mutex();
_named = new RM();
}
diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp
index 0270fbe2ba6..6e3dfe60309 100644
--- a/src/mongo/s/chunk_manager.cpp
+++ b/src/mongo/s/chunk_manager.cpp
@@ -116,7 +116,6 @@ namespace {
_keyPattern( pattern.getKeyPattern() ),
_unique( unique ),
_chunkRanges(),
- _mutex("ChunkManager"),
_sequenceNumber(NextSequenceNumber.addAndFetch(1))
{
//
@@ -135,7 +134,6 @@ namespace {
BSONObj()),
_unique(collDoc[CollectionType::unique()].trueValue()),
_chunkRanges(),
- _mutex("ChunkManager"),
// The shard versioning mechanism hinges on keeping track of the number of times we reloaded ChunkManager's.
// Increasing this number here will prompt checkShardVersion() to refresh the connection-level versions to
// the most up to date value.
@@ -663,7 +661,7 @@ namespace {
}
void ChunkManager::drop() const {
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk(_mutex);
configServer.logChange( "dropCollection.start" , _ns , BSONObj() );
diff --git a/src/mongo/s/client/shard_connection.cpp b/src/mongo/s/client/shard_connection.cpp
index c1357835bd2..69b17ea6656 100644
--- a/src/mongo/s/client/shard_connection.cpp
+++ b/src/mongo/s/client/shard_connection.cpp
@@ -65,17 +65,13 @@ namespace {
*/
class ActiveClientConnections {
public:
- ActiveClientConnections() : _mutex("ActiveClientConnections") {
-
- }
-
void add(const ClientConnections* cc) {
- scoped_lock lock(_mutex);
+ boost::lock_guard<boost::mutex> lock(_mutex);
_clientConnections.insert(cc);
}
void remove(const ClientConnections* cc) {
- scoped_lock lock(_mutex);
+ boost::lock_guard<boost::mutex> lock(_mutex);
_clientConnections.erase(cc);
}
@@ -391,7 +387,7 @@ namespace {
BSONArrayBuilder arr(64 * 1024); // There may be quite a few threads
{
- scoped_lock lock(_mutex);
+ boost::lock_guard<boost::mutex> lock(_mutex);
for (set<const ClientConnections*>::const_iterator i = _clientConnections.begin();
i != _clientConnections.end();
++i) {
diff --git a/src/mongo/s/config.cpp b/src/mongo/s/config.cpp
index d279fa06475..2cad43ca841 100644
--- a/src/mongo/s/config.cpp
+++ b/src/mongo/s/config.cpp
@@ -164,10 +164,7 @@ namespace mongo {
DBConfig::DBConfig(std::string name)
: _name(name),
_primary("config", "", 0 /* maxSize */, false /* draining */),
- _shardingEnabled(false),
- _lock("DBConfig"),
- _hitConfigServerLock("DBConfig::_hitConfigServerLock") {
-
+ _shardingEnabled(false) {
invariant(!_name.empty());
}
@@ -178,7 +175,7 @@ namespace mongo {
bool DBConfig::isSharded( const string& ns ) {
if ( ! _shardingEnabled )
return false;
- scoped_lock lk( _lock );
+ boost::lock_guard<boost::mutex> lk( _lock );
return _isSharded( ns );
}
@@ -221,7 +218,7 @@ namespace mongo {
verify( _name != "config" );
- scoped_lock lk( _lock );
+ boost::lock_guard<boost::mutex> lk( _lock );
_shardingEnabled = true;
if( save ) _save();
}
@@ -241,7 +238,7 @@ namespace mongo {
ChunkManagerPtr manager;
{
- scoped_lock lk( _lock );
+ boost::lock_guard<boost::mutex> lk( _lock );
CollectionInfo& ci = _collections[ns];
uassert( 8043 , "collection already sharded" , ! ci.isSharded() );
@@ -321,7 +318,7 @@ namespace mongo {
return false;
}
- scoped_lock lk( _lock );
+ boost::lock_guard<boost::mutex> lk( _lock );
CollectionInfoMap::iterator i = _collections.find( ns );
@@ -352,7 +349,7 @@ namespace mongo {
primary.reset();
{
- scoped_lock lk( _lock );
+ boost::lock_guard<boost::mutex> lk( _lock );
CollectionInfoMap::iterator i = _collections.find( ns );
@@ -403,7 +400,7 @@ namespace mongo {
ChunkManagerPtr oldManager;
{
- scoped_lock lk( _lock );
+ boost::lock_guard<boost::mutex> lk( _lock );
bool earlyReload = ! _collections[ns].isSharded() && ( shouldReload || forceReload );
if ( earlyReload ) {
@@ -441,7 +438,7 @@ namespace mongo {
if ( ! newest.isEmpty() ) {
ChunkVersion v = ChunkVersion::fromBSON(newest, ChunkType::DEPRECATED_lastmod());
if ( v.equals( oldVersion ) ) {
- scoped_lock lk( _lock );
+ boost::lock_guard<boost::mutex> lk( _lock );
CollectionInfo& ci = _collections[ns];
uassert( 15885 , str::stream() << "not sharded after reloading from chunks : " << ns , ci.isSharded() );
return ci.getCM();
@@ -459,13 +456,13 @@ namespace mongo {
auto_ptr<ChunkManager> temp;
{
- scoped_lock lll ( _hitConfigServerLock );
+ boost::lock_guard<boost::mutex> lll ( _hitConfigServerLock );
if ( ! newest.isEmpty() && ! forceReload ) {
// if we have a target we're going for
// see if we've hit already
- scoped_lock lk( _lock );
+ boost::lock_guard<boost::mutex> lk( _lock );
CollectionInfo& ci = _collections[ns];
if ( ci.isSharded() && ci.getCM() ) {
@@ -495,7 +492,7 @@ namespace mongo {
}
}
- scoped_lock lk( _lock );
+ boost::lock_guard<boost::mutex> lk( _lock );
CollectionInfo& ci = _collections[ns];
uassert( 14822 , (string)"state changed in the middle: " + ns , ci.isSharded() );
@@ -535,7 +532,7 @@ namespace mongo {
}
void DBConfig::setPrimary( const std::string& s ) {
- scoped_lock lk( _lock );
+ boost::lock_guard<boost::mutex> lk( _lock );
_primary.reset( s );
_save();
}
@@ -562,7 +559,7 @@ namespace mongo {
}
bool DBConfig::load() {
- scoped_lock lk( _lock );
+ boost::lock_guard<boost::mutex> lk( _lock );
return _load();
}
@@ -660,7 +657,7 @@ namespace mongo {
bool successful = false;
{
- scoped_lock lk( _lock );
+ boost::lock_guard<boost::mutex> lk( _lock );
successful = _reload();
}
@@ -798,7 +795,7 @@ namespace mongo {
}
void DBConfig::getAllShards(set<Shard>& shards) const {
- scoped_lock lk( _lock );
+ boost::lock_guard<boost::mutex> lk( _lock );
shards.insert(getPrimary());
for (CollectionInfoMap::const_iterator it(_collections.begin()), end(_collections.end()); it != end; ++it) {
if (it->second.isSharded()) {
@@ -809,7 +806,7 @@ namespace mongo {
void DBConfig::getAllShardedCollections( set<string>& namespaces ) const {
- scoped_lock lk( _lock );
+ boost::lock_guard<boost::mutex> lk( _lock );
for( CollectionInfoMap::const_iterator i = _collections.begin(); i != _collections.end(); i++ ) {
log() << "Coll : " << i->first << " sharded? " << i->second.isSharded() << endl;
diff --git a/src/mongo/s/config_server_checker_service.cpp b/src/mongo/s/config_server_checker_service.cpp
index 6f80a7004a1..92ac1062faf 100644
--- a/src/mongo/s/config_server_checker_service.cpp
+++ b/src/mongo/s/config_server_checker_service.cpp
@@ -45,7 +45,7 @@ namespace mongo {
boost::scoped_ptr<boost::thread> _checkerThread;
// Protects _isConsistentFromLastCheck.
- mutex _isConsistentMutex( "ConfigServerConsistent" );
+ mutex _isConsistentMutex;
bool _isConsistentFromLastCheck = true;
void checkConfigConsistency() {
@@ -53,7 +53,7 @@ namespace mongo {
bool isConsistent = configServer.ok( true );
{
- scoped_lock sl( _isConsistentMutex );
+ boost::lock_guard<boost::mutex> sl( _isConsistentMutex );
_isConsistentFromLastCheck = isConsistent;
}
@@ -63,7 +63,7 @@ namespace mongo {
}
bool isConfigServerConsistent() {
- scoped_lock sl( _isConsistentMutex );
+ boost::lock_guard<boost::mutex> sl( _isConsistentMutex );
return _isConsistentFromLastCheck;
}
diff --git a/src/mongo/s/cursors.cpp b/src/mongo/s/cursors.cpp
index 3fd45ecaa0e..75875d7177d 100644
--- a/src/mongo/s/cursors.cpp
+++ b/src/mongo/s/cursors.cpp
@@ -226,9 +226,8 @@ namespace mongo {
}
CursorCache::CursorCache()
- :_mutex( "CursorCache" ),
- _random( getCCRandomSeed() ),
- _shardedTotal(0) {
+ : _random( getCCRandomSeed() ),
+ _shardedTotal(0) {
}
CursorCache::~CursorCache() {
@@ -247,7 +246,7 @@ namespace mongo {
ShardedClientCursorPtr CursorCache::get( long long id ) const {
LOG(_myLogLevel) << "CursorCache::get id: " << id << endl;
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
MapSharded::const_iterator i = _cursors.find( id );
if ( i == _cursors.end() ) {
return ShardedClientCursorPtr();
@@ -258,7 +257,7 @@ namespace mongo {
int CursorCache::getMaxTimeMS( long long id ) const {
verify( id );
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
MapShardedInt::const_iterator i = _cursorsMaxTimeMS.find( id );
return ( i != _cursorsMaxTimeMS.end() ) ? i->second : 0;
}
@@ -272,7 +271,7 @@ namespace mongo {
verify( maxTimeMS == kMaxTimeCursorTimeLimitExpired
|| maxTimeMS == kMaxTimeCursorNoTimeLimit
|| maxTimeMS > 0 );
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
_cursorsMaxTimeMS[cursor->getId()] = maxTimeMS;
_cursors[cursor->getId()] = cursor;
_shardedTotal++;
@@ -283,20 +282,20 @@ namespace mongo {
verify( maxTimeMS == kMaxTimeCursorTimeLimitExpired
|| maxTimeMS == kMaxTimeCursorNoTimeLimit
|| maxTimeMS > 0 );
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
_cursorsMaxTimeMS[id] = maxTimeMS;
}
void CursorCache::remove( long long id ) {
verify( id );
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
_cursorsMaxTimeMS.erase( id );
_cursors.erase( id );
}
void CursorCache::removeRef( long long id ) {
verify( id );
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
_refs.erase( id );
_refsNS.erase( id );
cursorStatsSingleTarget.decrement();
@@ -305,7 +304,7 @@ namespace mongo {
void CursorCache::storeRef(const std::string& server, long long id, const std::string& ns) {
LOG(_myLogLevel) << "CursorCache::storeRef server: " << server << " id: " << id << endl;
verify( id );
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
_refs[id] = server;
_refsNS[id] = ns;
cursorStatsSingleTarget.increment();
@@ -313,7 +312,7 @@ namespace mongo {
string CursorCache::getRef( long long id ) const {
verify( id );
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
MapNormal::const_iterator i = _refs.find( id );
LOG(_myLogLevel) << "CursorCache::getRef id: " << id << " out: " << ( i == _refs.end() ? " NONE " : i->second ) << endl;
@@ -325,7 +324,7 @@ namespace mongo {
std::string CursorCache::getRefNS(long long id) const {
verify(id);
- scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
MapNormal::const_iterator i = _refsNS.find(id);
LOG(_myLogLevel) << "CursorCache::getRefNs id: " << id
@@ -339,7 +338,7 @@ namespace mongo {
long long CursorCache::genId() {
while ( true ) {
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
long long x = Listener::getElapsedTimeMillis() << 32;
x |= _random.nextInt32();
@@ -391,7 +390,7 @@ namespace mongo {
string server;
{
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
MapSharded::iterator i = _cursors.find( id );
if ( i != _cursors.end() ) {
@@ -442,7 +441,7 @@ namespace mongo {
}
void CursorCache::appendInfo( BSONObjBuilder& result ) const {
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
result.append( "sharded", static_cast<int>(cursorStatsMultiTarget.get()));
result.appendNumber( "shardedEver" , _shardedTotal );
result.append( "refs", static_cast<int>(cursorStatsSingleTarget.get()));
@@ -451,7 +450,7 @@ namespace mongo {
void CursorCache::doTimeouts() {
long long now = Listener::getElapsedTimeMillis();
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
for ( MapSharded::iterator i=_cursors.begin(); i!=_cursors.end(); ++i ) {
// Note: cursors with no timeout will always have an idleTime of 0
long long idleFor = i->second->idleTime( now );
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 64486cc827d..72f1740ca8c 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -253,11 +253,9 @@ namespace mongo {
class MigrateFromStatus {
public:
MigrateFromStatus():
- _mutex("MigrateFromStatus"),
_inCriticalSection(false),
_memoryUsed(0),
- _active(false),
- _cloneLocsMutex("MigrateFromTrackerMutex") {
+ _active(false) {
}
/**
@@ -276,7 +274,7 @@ namespace mongo {
// Get global shared to synchronize with logOp. Also see comments in the class
// members declaration for more details.
Lock::GlobalRead globalShared(txn->lockState());
- scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
if (_active) {
return false;
@@ -293,7 +291,7 @@ namespace mongo {
_active = true;
- scoped_lock tLock(_cloneLocsMutex);
+ boost::lock_guard<boost::mutex> tLock(_cloneLocsMutex);
verify(_cloneLocs.size() == 0);
return true;
@@ -306,7 +304,7 @@ namespace mongo {
// Get global shared to synchronize with logOp. Also see comments in the class
// members declaration for more details.
Lock::GlobalRead globalShared(txn->lockState());
- scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
_active = false;
_deleteNotifyExec.reset( NULL );
@@ -317,7 +315,7 @@ namespace mongo {
_reload.clear();
_memoryUsed = 0;
- scoped_lock cloneLock(_cloneLocsMutex);
+ boost::lock_guard<boost::mutex> cloneLock(_cloneLocsMutex);
_cloneLocs.clear();
}
@@ -443,7 +441,7 @@ namespace mongo {
{
AutoGetCollectionForRead ctx(txn, getNS());
- scoped_lock sl(_mutex);
+ boost::lock_guard<boost::mutex> sl(_mutex);
if (!_active) {
errmsg = "no active migration!";
return false;
@@ -502,7 +500,7 @@ namespace mongo {
// It's alright not to lock _mutex all the way through based on the assumption
// that this is only called by the main thread that drives the migration and
// only it can start and stop the current migration.
- scoped_lock sl(_mutex);
+ boost::lock_guard<boost::mutex> sl(_mutex);
invariant( _deleteNotifyExec.get() == NULL );
WorkingSet* ws = new WorkingSet();
@@ -552,7 +550,7 @@ namespace mongo {
RecordId dl;
while (PlanExecutor::ADVANCED == exec->getNext(NULL, &dl)) {
if ( ! isLargeChunk ) {
- scoped_lock lk(_cloneLocsMutex);
+ boost::lock_guard<boost::mutex> lk(_cloneLocsMutex);
_cloneLocs.insert( dl );
}
@@ -565,7 +563,7 @@ namespace mongo {
exec.reset();
if ( isLargeChunk ) {
- scoped_lock sl(_mutex);
+ boost::lock_guard<boost::mutex> sl(_mutex);
warning() << "cannot move chunk: the maximum number of documents for a chunk is "
<< maxRecsWhenFull << " , the maximum chunk size is " << maxChunkSize
<< " , average document size is " << avgRecSize
@@ -593,7 +591,7 @@ namespace mongo {
{
AutoGetCollectionForRead ctx(txn, getNS());
- scoped_lock sl(_mutex);
+ boost::lock_guard<boost::mutex> sl(_mutex);
if (!_active) {
errmsg = "not active";
return false;
@@ -616,7 +614,7 @@ namespace mongo {
while (!isBufferFilled) {
AutoGetCollectionForRead ctx(txn, getNS());
- scoped_lock sl(_mutex);
+ boost::lock_guard<boost::mutex> sl(_mutex);
if (!_active) {
errmsg = "not active";
return false;
@@ -631,7 +629,7 @@ namespace mongo {
return false;
}
- scoped_lock lk(_cloneLocsMutex);
+ boost::lock_guard<boost::mutex> lk(_cloneLocsMutex);
set<RecordId>::iterator cloneLocsIter = _cloneLocs.begin();
for ( ; cloneLocsIter != _cloneLocs.end(); ++cloneLocsIter) {
if (tracker.intervalHasElapsed()) // should I yield?
@@ -674,33 +672,33 @@ namespace mongo {
// that check only works for non-mmapv1 engines, and this is needed
// for mmapv1.
- scoped_lock lk(_cloneLocsMutex);
+ boost::lock_guard<boost::mutex> lk(_cloneLocsMutex);
_cloneLocs.erase( dl );
}
std::size_t cloneLocsRemaining() {
- scoped_lock lk(_cloneLocsMutex);
+ boost::lock_guard<boost::mutex> lk(_cloneLocsMutex);
return _cloneLocs.size();
}
long long mbUsed() const {
- scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
return _memoryUsed / ( 1024 * 1024 );
}
bool getInCriticalSection() const {
- scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
return _inCriticalSection;
}
void setInCriticalSection( bool b ) {
- scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
_inCriticalSection = b;
_inCriticalSectionCV.notify_all();
}
std::string getNS() const {
- scoped_lock sl(_mutex);
+ boost::lock_guard<boost::mutex> sl(_mutex);
return _ns;
}
@@ -712,9 +710,9 @@ namespace mongo {
boost::xtime_get(&xt, MONGO_BOOST_TIME_UTC);
xt.sec += maxSecondsToWait;
- scoped_lock lk(_mutex);
+ boost::unique_lock<boost::mutex> lk(_mutex);
while (_inCriticalSection) {
- if (!_inCriticalSectionCV.timed_wait(lk.boost(), xt))
+ if (!_inCriticalSectionCV.timed_wait(lk, xt))
return false;
}
@@ -724,8 +722,8 @@ namespace mongo {
bool isActive() const { return _getActive(); }
private:
- bool _getActive() const { scoped_lock lk(_mutex); return _active; }
- void _setActive( bool b ) { scoped_lock lk(_mutex); _active = b; }
+ bool _getActive() const { boost::lock_guard<boost::mutex> lk(_mutex); return _active; }
+ void _setActive( bool b ) { boost::lock_guard<boost::mutex> lk(_mutex); _active = b; }
/**
* Used to commit work for LogOpForSharding. Used to keep track of changes in documents
@@ -748,7 +746,7 @@ namespace mongo {
virtual void commit() {
switch (_op) {
case 'd': {
- scoped_lock sl(_migrateFromStatus->_mutex);
+ boost::lock_guard<boost::mutex> sl(_migrateFromStatus->_mutex);
_migrateFromStatus->_deleted.push_back(_idObj);
_migrateFromStatus->_memoryUsed += _idObj.firstElement().size() + 5;
break;
@@ -757,7 +755,7 @@ namespace mongo {
case 'i':
case 'u':
{
- scoped_lock sl(_migrateFromStatus->_mutex);
+ boost::lock_guard<boost::mutex> sl(_migrateFromStatus->_mutex);
_migrateFromStatus->_reload.push_back(_idObj);
_migrateFromStatus->_memoryUsed += _idObj.firstElement().size() + 5;
break;
@@ -1818,7 +1816,6 @@ namespace mongo {
};
MigrateStatus():
- _mutex("MigrateStatus"),
_active(false),
_numCloned(0),
_clonedBytes(0),
@@ -1828,12 +1825,12 @@ namespace mongo {
}
void setState(State newState) {
- scoped_lock sl(_mutex);
+ boost::lock_guard<boost::mutex> sl(_mutex);
_state = newState;
}
State getState() const {
- scoped_lock sl(_mutex);
+ boost::lock_guard<boost::mutex> sl(_mutex);
return _state;
}
@@ -1845,7 +1842,7 @@ namespace mongo {
const BSONObj& min,
const BSONObj& max,
const BSONObj& shardKeyPattern) {
- scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
if (_active) {
return Status(ErrorCodes::ConflictingOperationInProgress,
@@ -1888,7 +1885,7 @@ namespace mongo {
}
catch ( std::exception& e ) {
{
- scoped_lock sl(_mutex);
+ boost::lock_guard<boost::mutex> sl(_mutex);
_state = FAIL;
_errmsg = e.what();
}
@@ -1897,7 +1894,7 @@ namespace mongo {
}
catch ( ... ) {
{
- scoped_lock sl(_mutex);
+ boost::lock_guard<boost::mutex> sl(_mutex);
_state = FAIL;
_errmsg = "UNKNOWN ERROR";
}
@@ -2182,7 +2179,7 @@ namespace mongo {
thisTime++;
{
- scoped_lock statsLock(_mutex);
+ boost::lock_guard<boost::mutex> statsLock(_mutex);
_numCloned++;
_clonedBytes += docToClone.objsize();
}
@@ -2354,7 +2351,7 @@ namespace mongo {
}
void status(BSONObjBuilder& b) {
- scoped_lock sl(_mutex);
+ boost::lock_guard<boost::mutex> sl(_mutex);
b.appendBool("active", _active);
@@ -2569,7 +2566,7 @@ namespace mongo {
}
bool startCommit() {
- scoped_lock lock(_mutex);
+ boost::unique_lock<boost::mutex> lock(_mutex);
if (_state != STEADY) {
return false;
@@ -2581,7 +2578,7 @@ namespace mongo {
_state = COMMIT_START;
while (_active) {
- if ( ! isActiveCV.timed_wait( lock.boost(), xt ) ){
+ if ( ! isActiveCV.timed_wait( lock, xt ) ){
// TIMEOUT
_state = FAIL;
log() << "startCommit never finished!" << migrateLog;
@@ -2598,14 +2595,14 @@ namespace mongo {
}
void abort() {
- scoped_lock sl(_mutex);
+ boost::lock_guard<boost::mutex> sl(_mutex);
_state = ABORT;
_errmsg = "aborted";
}
- bool getActive() const { scoped_lock lk(_mutex); return _active; }
+ bool getActive() const { boost::lock_guard<boost::mutex> lk(_mutex); return _active; }
void setActive( bool b ) {
- scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
_active = b;
isActiveCV.notify_all();
}
diff --git a/src/mongo/s/d_state.cpp b/src/mongo/s/d_state.cpp
index 15b2ef6ec9f..c89d7532755 100644
--- a/src/mongo/s/d_state.cpp
+++ b/src/mongo/s/d_state.cpp
@@ -76,12 +76,12 @@ namespace mongo {
// -----ShardingState START ----
ShardingState::ShardingState()
- : _enabled(false) , _mutex( "ShardingState" ),
+ : _enabled(false),
_configServerTickets( 3 /* max number of concurrent config server refresh threads */ ) {
}
void ShardingState::enable( const string& server ) {
- scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
_enabled = true;
verify( server.size() );
@@ -108,7 +108,7 @@ namespace mongo {
}
bool ShardingState::setShardNameAndHost( const string& name, const string& host ) {
- scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
if ( _shardName.size() == 0 ) {
// TODO SERVER-2299 remotely verify the name is sound w.r.t IPs
_shardName = name;
@@ -156,7 +156,7 @@ namespace mongo {
}
void ShardingState::resetShardingState() {
- scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
_enabled = false;
_configServer.clear();
@@ -166,14 +166,14 @@ namespace mongo {
// TODO we shouldn't need three ways for checking the version. Fix this.
bool ShardingState::hasVersion( const string& ns ) {
- scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
CollectionMetadataMap::const_iterator it = _collMetadata.find(ns);
return it != _collMetadata.end();
}
bool ShardingState::hasVersion( const string& ns , ChunkVersion& version ) {
- scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
CollectionMetadataMap::const_iterator it = _collMetadata.find(ns);
if ( it == _collMetadata.end() )
@@ -185,7 +185,7 @@ namespace mongo {
}
const ChunkVersion ShardingState::getVersion( const string& ns ) const {
- scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
CollectionMetadataMap::const_iterator it = _collMetadata.find( ns );
if ( it != _collMetadata.end() ) {
@@ -204,7 +204,7 @@ namespace mongo {
ChunkVersion version) {
invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
CollectionMetadataMap::const_iterator it = _collMetadata.find( ns );
verify( it != _collMetadata.end() ) ;
@@ -234,7 +234,7 @@ namespace mongo {
CollectionMetadataPtr prevMetadata) {
invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
log() << "ShardingState::undoDonateChunk acquired _mutex" << endl;
@@ -251,7 +251,7 @@ namespace mongo {
string* errMsg ) {
invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
CollectionMetadataMap::const_iterator it = _collMetadata.find( ns );
if ( it == _collMetadata.end() ) {
@@ -296,7 +296,7 @@ namespace mongo {
string* errMsg ) {
invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
CollectionMetadataMap::const_iterator it = _collMetadata.find( ns );
if ( it == _collMetadata.end() ) {
@@ -341,7 +341,7 @@ namespace mongo {
ChunkVersion version ) {
invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
CollectionMetadataMap::const_iterator it = _collMetadata.find( ns );
verify( it != _collMetadata.end() ) ;
@@ -365,7 +365,7 @@ namespace mongo {
ChunkVersion mergedVersion ) {
invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
CollectionMetadataMap::const_iterator it = _collMetadata.find( ns );
verify( it != _collMetadata.end() );
@@ -383,7 +383,7 @@ namespace mongo {
}
void ShardingState::resetMetadata( const string& ns ) {
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
warning() << "resetting metadata for " << ns << ", this should only be used in testing"
<< endl;
@@ -417,7 +417,7 @@ namespace mongo {
CollectionMetadataPtr storedMetadata;
{
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
CollectionMetadataMap::iterator it = _collMetadata.find( ns );
if ( it != _collMetadata.end() ) storedMetadata = it->second;
}
@@ -483,7 +483,7 @@ namespace mongo {
string configServer;
{
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
// We can't reload if sharding is not enabled - i.e. without a config server location
if (!_enabled) {
@@ -608,7 +608,7 @@ namespace mongo {
// Get the metadata now that the load has completed
//
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
// Don't reload if our config server has changed or sharding is no longer enabled
if (!_enabled) {
@@ -785,7 +785,7 @@ namespace mongo {
void ShardingState::appendInfo(BSONObjBuilder& builder) {
- scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
builder.appendBool("enabled", _enabled);
if (!_enabled)
@@ -814,7 +814,7 @@ namespace mongo {
}
CollectionMetadataPtr ShardingState::getCollectionMetadata( const string& ns ) {
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
CollectionMetadataMap::const_iterator it = _collMetadata.find( ns );
if ( it == _collMetadata.end() ) {
@@ -866,10 +866,10 @@ namespace mongo {
}
void ShardedConnectionInfo::addHook() {
- static mongo::mutex lock("ShardedConnectionInfo::addHook mutex");
+ static mongo::mutex lock;
static bool done = false;
- scoped_lock lk(lock);
+ boost::lock_guard<boost::mutex> lk(lock);
if (!done) {
log() << "first cluster operation detected, adding sharding hook to enable versioning "
"and authentication to remote servers" << endl;
diff --git a/src/mongo/s/d_state.h b/src/mongo/s/d_state.h
index d9edb1159b9..34368b0c143 100644
--- a/src/mongo/s/d_state.h
+++ b/src/mongo/s/d_state.h
@@ -60,7 +60,7 @@ namespace mongo {
void gotShardName( const std::string& name );
bool setShardName( const std::string& name ); // Same as above, does not throw
- std::string getShardName() { scoped_lock lk(_mutex); return _shardName; }
+ std::string getShardName() { boost::lock_guard<boost::mutex> lk(_mutex); return _shardName; }
// Helpers for SetShardVersion which report the host name sent to this shard when the shard
// name does not match. Do not use in other places.
diff --git a/src/mongo/s/distlock.cpp b/src/mongo/s/distlock.cpp
index 651811f3ef6..7ae4749c092 100644
--- a/src/mongo/s/distlock.cpp
+++ b/src/mongo/s/distlock.cpp
@@ -108,11 +108,6 @@ namespace mongo {
class DistributedLockPinger {
public:
-
- DistributedLockPinger()
- : _mutex( "DistributedLockPinger" ) {
- }
-
void _distLockPingThread( ConnectionString addr,
const std::string& process,
unsigned long long sleepTime ) {
@@ -213,7 +208,7 @@ namespace mongo {
// Remove old locks, if possible
// Make sure no one else is adding to this list at the same time
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
int numOldLocks = _oldLockOIDs.size();
if( numOldLocks > 0 ) {
@@ -294,7 +289,7 @@ namespace mongo {
if (!lockPingerEnabled) return "";
// Make sure we don't start multiple threads for a process id
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
const ConnectionString& conn = lock.getRemoteConnection();
const string& processId = lock.getProcessId();
@@ -320,18 +315,18 @@ namespace mongo {
void addUnlockOID( const OID& oid ) {
// Modifying the lock from some other thread
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
_oldLockOIDs.push_back( oid );
}
bool willUnlockOID( const OID& oid ) {
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
return find( _oldLockOIDs.begin(), _oldLockOIDs.end(), oid ) != _oldLockOIDs.end();
}
void kill( const ConnectionString& conn, const string& processId ) {
// Make sure we're in a consistent state before other threads can see us
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
string pingId = pingThreadId( conn, processId );
@@ -341,13 +336,13 @@ namespace mongo {
}
bool shouldKill( const ConnectionString& conn, const string& processId ) {
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
return _kill.count( pingThreadId( conn, processId ) ) > 0;
}
void finishKill( const ConnectionString& conn, const string& processId ) {
// Make sure we're in a consistent state before other threads can see us
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
string pingId = pingThreadId( conn, processId );
@@ -374,7 +369,7 @@ namespace mongo {
_processId( asProcess ? getDistLockId() : getDistLockProcess() ),
_lockTimeout( lockTimeout == 0 ? LOCK_TIMEOUT : lockTimeout ),
_maxClockSkew( _lockTimeout / LOCK_SKEW_FACTOR ), _maxNetSkew( _maxClockSkew ),
- _lockPing( _maxClockSkew ), _mutex( "DistributedLock" )
+ _lockPing( _maxClockSkew )
{
LOG( logLvl ) << "created new distributed lock for " << name << " on " << conn
<< " ( lock timeout : " << _lockTimeout
@@ -384,12 +379,12 @@ namespace mongo {
}
DistributedLock::PingData DistributedLock::LastPings::getLastPing( const ConnectionString& conn, const string& lockName ){
- scoped_lock lock( _mutex );
+ boost::lock_guard<boost::mutex> lock( _mutex );
return _lastPings[ std::pair< string, string >( conn.toString(), lockName ) ];
}
void DistributedLock::LastPings::setLastPing( const ConnectionString& conn, const string& lockName, const PingData& pd ){
- scoped_lock lock( _mutex );
+ boost::lock_guard<boost::mutex> lock( _mutex );
_lastPings[ std::pair< string, string >( conn.toString(), lockName ) ] = pd;
}
@@ -613,7 +608,7 @@ namespace mongo {
// TODO: Start pinging only when we actually get the lock?
// If we don't have a thread pinger, make sure we shouldn't have one
if( _threadId == "" ){
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
_threadId = distLockPinger.got( *this, _lockPing );
}
diff --git a/src/mongo/s/distlock.h b/src/mongo/s/distlock.h
index 4ce30222068..1b52039129c 100644
--- a/src/mongo/s/distlock.h
+++ b/src/mongo/s/distlock.h
@@ -127,9 +127,6 @@ namespace mongo {
class LastPings {
public:
- LastPings() : _mutex( "DistributedLock::LastPings" ) {}
- ~LastPings(){}
-
PingData getLastPing( const ConnectionString& conn, const std::string& lockName );
void setLastPing( const ConnectionString& conn, const std::string& lockName, const PingData& pd );
@@ -137,7 +134,7 @@ namespace mongo {
std::map< std::pair<std::string, std::string>, PingData > _lastPings;
};
- static LastPings lastPings;
+ static LastPings lastPings;
/**
* The constructor does not connect to the configdb yet and constructing does not mean the lock was acquired.
diff --git a/src/mongo/s/grid.cpp b/src/mongo/s/grid.cpp
index 60874a05c57..7e10b231c60 100644
--- a/src/mongo/s/grid.cpp
+++ b/src/mongo/s/grid.cpp
@@ -67,15 +67,7 @@ namespace mongo {
MONGO_FP_DECLARE(neverBalance);
- Grid::Grid()
- : _lock("Grid"),
- _allowLocalShard(true) {
-
- }
-
- Grid::~Grid() {
-
- }
+ Grid::Grid() : _allowLocalShard(true) {}
DBConfigPtr Grid::getDBConfig( StringData ns , bool create , const string& shardNameHint ) {
string database = nsToDatabase( ns );
@@ -87,7 +79,7 @@ namespace mongo {
str::stream() << "invalid database name: " << database,
NamespaceString::validDBName( database ) );
- scoped_lock l( _lock );
+ boost::lock_guard<boost::mutex> l( _lock );
DBConfigPtr& dbConfig = _databases[database];
if( ! dbConfig ){
@@ -197,14 +189,14 @@ namespace mongo {
void Grid::removeDB( const std::string& database ) {
uassert( 10186 , "removeDB expects db name" , database.find( '.' ) == string::npos );
- scoped_lock l( _lock );
+ boost::lock_guard<boost::mutex> l( _lock );
_databases.erase( database );
}
void Grid::removeDBIfExists( const DBConfig& database ) {
- scoped_lock l( _lock );
+ boost::lock_guard<boost::mutex> l( _lock );
map<string,DBConfigPtr>::iterator it = _databases.find( database.getName() );
if( it != _databases.end() && it->second.get() == &database ){
@@ -650,7 +642,7 @@ namespace mongo {
}
void Grid::flushConfig() {
- scoped_lock lk( _lock );
+ boost::lock_guard<boost::mutex> lk( _lock );
_databases.clear();
}
diff --git a/src/mongo/s/grid.h b/src/mongo/s/grid.h
index 1543e354567..89822916a73 100644
--- a/src/mongo/s/grid.h
+++ b/src/mongo/s/grid.h
@@ -48,7 +48,6 @@ namespace mongo {
class Grid {
public:
Grid();
- ~Grid();
/**
* gets the config the db.
diff --git a/src/mongo/s/shard.cpp b/src/mongo/s/shard.cpp
index 6df1eeb915d..14804c9a0de 100644
--- a/src/mongo/s/shard.cpp
+++ b/src/mongo/s/shard.cpp
@@ -89,7 +89,6 @@ namespace mongo {
class StaticShardInfo {
public:
- StaticShardInfo() : _mutex("StaticShardInfo"), _rsMutex("RSNameMap") { }
void reload() {
list<BSONObj> all;
@@ -110,7 +109,7 @@ namespace mongo {
conn.done();
}
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
// We use the _lookup table for all shards and for the primary config DB. The config DB info,
// however, does not come from the ShardNS::shard. So when cleaning the _lookup table we leave
@@ -152,7 +151,7 @@ namespace mongo {
}
ShardPtr findIfExists( const string& shardName ) {
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
ShardMap::iterator i = _lookup.find( shardName );
if ( i != _lookup.end() ) return i->second;
return ShardPtr();
@@ -166,7 +165,7 @@ namespace mongo {
errmsg.empty());
if (connStr.type() == ConnectionString::SET) {
- scoped_lock lk(_rsMutex);
+ boost::lock_guard<boost::mutex> lk(_rsMutex);
ShardMap::iterator iter = _rsLookup.find(connStr.getSetName());
if (iter == _rsLookup.end()) {
@@ -176,7 +175,7 @@ namespace mongo {
return iter->second;
}
else {
- scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
ShardMap::iterator iter = _lookup.find(ident);
if (iter == _lookup.end()) {
@@ -206,7 +205,7 @@ namespace mongo {
// Note: this doesn't refresh the table if the name isn't found, so it's possible that
// a newly added shard/Replica Set may not be found.
Shard lookupRSName( const string& name) {
- scoped_lock lk( _rsMutex );
+ boost::lock_guard<boost::mutex> lk( _rsMutex );
ShardMap::iterator i = _rsLookup.find( name );
return (i == _rsLookup.end()) ? Shard::EMPTY : *(i->second.get());
@@ -215,13 +214,13 @@ namespace mongo {
// Useful for ensuring our shard data will not be modified while we use it
Shard findCopy( const string& ident ){
ShardPtr found = findWithRetry(ident);
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
massert( 13128 , (string)"can't find shard for: " + ident , found.get() );
return *found.get();
}
void set( const string& name , const Shard& s , bool setName = true , bool setAddr = true ) {
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
ShardPtr ss( new Shard( s ) );
if ( setName )
_lookup[name] = ss;
@@ -235,7 +234,7 @@ namespace mongo {
const ConnectionString& cs = s->getAddress();
if ( cs.type() == ConnectionString::SET ) {
if ( cs.getSetName().size() ) {
- scoped_lock lk( _rsMutex);
+ boost::lock_guard<boost::mutex> lk( _rsMutex);
_rsLookup[ cs.getSetName() ] = s;
}
vector<HostAndPort> servers = cs.getServers();
@@ -246,7 +245,7 @@ namespace mongo {
}
void remove( const string& name ) {
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
for ( ShardMap::iterator i = _lookup.begin(); i!=_lookup.end(); ) {
ShardPtr s = i->second;
if ( s->getName() == name ) {
@@ -268,7 +267,7 @@ namespace mongo {
}
void getAllShards( vector<ShardPtr>& all ) const {
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
std::set<string> seen;
for ( ShardMap::const_iterator i = _lookup.begin(); i!=_lookup.end(); ++i ) {
const ShardPtr& s = i->second;
@@ -282,7 +281,7 @@ namespace mongo {
}
void getAllShards( vector<Shard>& all ) const {
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
std::set<string> seen;
for ( ShardMap::const_iterator i = _lookup.begin(); i!=_lookup.end(); ++i ) {
const ShardPtr& s = i->second;
@@ -297,7 +296,7 @@ namespace mongo {
bool isAShardNode( const string& addr ) const {
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
// check direct nods or set names
ShardMap::const_iterator i = _lookup.find( addr );
@@ -317,7 +316,7 @@ namespace mongo {
}
bool getShardMap( BSONObjBuilder& result , string& errmsg ) const {
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
BSONObjBuilder b( _lookup.size() + 50 );
diff --git a/src/mongo/s/version_manager.cpp b/src/mongo/s/version_manager.cpp
index 9396f12a8a3..708ca3efbe7 100644
--- a/src/mongo/s/version_manager.cpp
+++ b/src/mongo/s/version_manager.cpp
@@ -59,12 +59,8 @@ namespace mongo {
*/
struct ConnectionShardStatus {
- ConnectionShardStatus()
- : _mutex( "ConnectionShardStatus" ) {
- }
-
bool hasAnySequenceSet(DBClientBase* conn) {
- scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
SequenceMap::const_iterator seenConnIt = _map.find(conn->getConnectionId());
return seenConnIt != _map.end() && seenConnIt->second.size() > 0;
@@ -74,7 +70,7 @@ namespace mongo {
const string& ns,
unsigned long long* sequence) {
- scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
SequenceMap::const_iterator seenConnIt = _map.find(conn->getConnectionId());
if (seenConnIt == _map.end())
@@ -89,12 +85,12 @@ namespace mongo {
}
void setSequence( DBClientBase * conn , const string& ns , const unsigned long long& s ) {
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
_map[conn->getConnectionId()][ns] = s;
}
void reset( DBClientBase * conn ) {
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
_map.erase( conn->getConnectionId() );
}
diff --git a/src/mongo/scripting/engine.cpp b/src/mongo/scripting/engine.cpp
index b9403621652..3f56ee467b3 100644
--- a/src/mongo/scripting/engine.cpp
+++ b/src/mongo/scripting/engine.cpp
@@ -315,10 +315,8 @@ namespace {
namespace {
class ScopeCache {
public:
- ScopeCache() : _mutex("ScopeCache") {}
-
void release(const string& poolName, const boost::shared_ptr<Scope>& scope) {
- scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
if (scope->hasOutOfMemoryException()) {
// make some room
@@ -344,7 +342,7 @@ namespace {
}
boost::shared_ptr<Scope> tryAcquire(OperationContext* txn, const string& poolName) {
- scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
for (Pools::iterator it = _pools.begin(); it != _pools.end(); ++it) {
if (it->poolName == poolName) {
diff --git a/src/mongo/scripting/engine_v8-3.25.cpp b/src/mongo/scripting/engine_v8-3.25.cpp
index 877bc32308b..2cb47b814c1 100644
--- a/src/mongo/scripting/engine_v8-3.25.cpp
+++ b/src/mongo/scripting/engine_v8-3.25.cpp
@@ -394,7 +394,7 @@ namespace mongo {
}
void V8ScriptEngine::interrupt(unsigned opId) {
- mongo::mutex::scoped_lock intLock(_globalInterruptLock);
+ boost::lock_guard<boost::mutex> intLock(_globalInterruptLock);
OpIdToScopeMap::iterator iScope = _opToScopeMap.find(opId);
if (iScope == _opToScopeMap.end()) {
// got interrupt request for a scope that no longer exists
@@ -407,7 +407,7 @@ namespace mongo {
}
void V8ScriptEngine::interruptAll() {
- mongo::mutex::scoped_lock interruptLock(_globalInterruptLock);
+ boost::lock_guard<boost::mutex> interruptLock(_globalInterruptLock);
for (OpIdToScopeMap::iterator iScope = _opToScopeMap.begin();
iScope != _opToScopeMap.end(); ++iScope) {
iScope->second->kill();
@@ -415,7 +415,7 @@ namespace mongo {
}
void V8Scope::registerOperation(OperationContext* txn) {
- scoped_lock giLock(_engine->_globalInterruptLock);
+ boost::lock_guard<boost::mutex> giLock(_engine->_globalInterruptLock);
invariant(_opId == 0);
_opId = txn->getOpID();
_engine->_opToScopeMap[_opId] = this;
@@ -427,7 +427,7 @@ namespace mongo {
}
void V8Scope::unregisterOperation() {
- scoped_lock giLock(_engine->_globalInterruptLock);
+ boost::lock_guard<boost::mutex> giLock(_engine->_globalInterruptLock);
LOG(2) << "V8Scope " << static_cast<const void*>(this) << " unregistered for op "
<< _opId << endl;
if (_opId != 0) {
@@ -441,7 +441,7 @@ namespace mongo {
bool V8Scope::nativePrologue() {
v8::Locker l(_isolate);
- mongo::mutex::scoped_lock cbEnterLock(_interruptLock);
+ boost::lock_guard<boost::mutex> cbEnterLock(_interruptLock);
if (v8::V8::IsExecutionTerminating(_isolate)) {
LOG(2) << "v8 execution interrupted. isolate: "
<< static_cast<const void*>(_isolate) << endl;
@@ -460,7 +460,7 @@ namespace mongo {
bool V8Scope::nativeEpilogue() {
v8::Locker l(_isolate);
- mongo::mutex::scoped_lock cbLeaveLock(_interruptLock);
+ boost::lock_guard<boost::mutex> cbLeaveLock(_interruptLock);
_inNativeExecution = false;
if (v8::V8::IsExecutionTerminating(_isolate)) {
LOG(2) << "v8 execution interrupted. isolate: "
@@ -477,7 +477,7 @@ namespace mongo {
}
void V8Scope::kill() {
- mongo::mutex::scoped_lock interruptLock(_interruptLock);
+ boost::lock_guard<boost::mutex> interruptLock(_interruptLock);
if (!_inNativeExecution) {
// Set the TERMINATE flag on the stack guard for this isolate.
// This won't happen between calls to nativePrologue and nativeEpilogue().
diff --git a/src/mongo/scripting/engine_v8.cpp b/src/mongo/scripting/engine_v8.cpp
index 3df613e4961..62c38f3bd8f 100644
--- a/src/mongo/scripting/engine_v8.cpp
+++ b/src/mongo/scripting/engine_v8.cpp
@@ -351,7 +351,6 @@ namespace mongo {
}
V8ScriptEngine::V8ScriptEngine() :
- _globalInterruptLock("GlobalV8InterruptLock"),
_opToScopeMap(),
_deadlineMonitor() {
}
@@ -374,7 +373,7 @@ namespace mongo {
}
void V8ScriptEngine::interrupt(unsigned opId) {
- mongo::mutex::scoped_lock intLock(_globalInterruptLock);
+ boost::lock_guard<boost::mutex> intLock(_globalInterruptLock);
OpIdToScopeMap::iterator iScope = _opToScopeMap.find(opId);
if (iScope == _opToScopeMap.end()) {
// got interrupt request for a scope that no longer exists
@@ -387,7 +386,7 @@ namespace mongo {
}
void V8ScriptEngine::interruptAll() {
- mongo::mutex::scoped_lock interruptLock(_globalInterruptLock);
+ boost::lock_guard<boost::mutex> interruptLock(_globalInterruptLock);
for (OpIdToScopeMap::iterator iScope = _opToScopeMap.begin();
iScope != _opToScopeMap.end(); ++iScope) {
iScope->second->kill();
@@ -395,7 +394,7 @@ namespace mongo {
}
void V8Scope::registerOperation(OperationContext* txn) {
- scoped_lock giLock(_engine->_globalInterruptLock);
+ boost::lock_guard<boost::mutex> giLock(_engine->_globalInterruptLock);
invariant(_opId == 0);
_opId = txn->getOpID();
_engine->_opToScopeMap[_opId] = this;
@@ -407,7 +406,7 @@ namespace mongo {
}
void V8Scope::unregisterOperation() {
- scoped_lock giLock(_engine->_globalInterruptLock);
+ boost::lock_guard<boost::mutex> giLock(_engine->_globalInterruptLock);
LOG(2) << "V8Scope " << static_cast<const void*>(this) << " unregistered for op " << _opId << endl;
if (_opId != 0) {
// scope is currently associated with an operation id
@@ -420,7 +419,7 @@ namespace mongo {
bool V8Scope::nativePrologue() {
v8::Locker l(_isolate);
- mongo::mutex::scoped_lock cbEnterLock(_interruptLock);
+ boost::lock_guard<boost::mutex> cbEnterLock(_interruptLock);
if (v8::V8::IsExecutionTerminating(_isolate)) {
LOG(2) << "v8 execution interrupted. isolate: " << static_cast<const void*>(_isolate) << endl;
return false;
@@ -437,7 +436,7 @@ namespace mongo {
bool V8Scope::nativeEpilogue() {
v8::Locker l(_isolate);
- mongo::mutex::scoped_lock cbLeaveLock(_interruptLock);
+ boost::lock_guard<boost::mutex> cbLeaveLock(_interruptLock);
_inNativeExecution = false;
if (v8::V8::IsExecutionTerminating(_isolate)) {
LOG(2) << "v8 execution interrupted. isolate: " << static_cast<const void*>(_isolate) << endl;
@@ -452,7 +451,7 @@ namespace mongo {
}
void V8Scope::kill() {
- mongo::mutex::scoped_lock interruptLock(_interruptLock);
+ boost::lock_guard<boost::mutex> interruptLock(_interruptLock);
if (!_inNativeExecution) {
// Set the TERMINATE flag on the stack guard for this isolate.
// This won't happen between calls to nativePrologue and nativeEpilogue().
@@ -492,7 +491,6 @@ namespace mongo {
: _engine(engine),
_connectState(NOT),
_cpuProfiler(),
- _interruptLock("ScopeInterruptLock"),
_inNativeExecution(true),
_pendingKill(false),
_opId(0),
diff --git a/src/mongo/scripting/v8_deadline_monitor.h b/src/mongo/scripting/v8_deadline_monitor.h
index 45913e65b48..1d892ed8752 100644
--- a/src/mongo/scripting/v8_deadline_monitor.h
+++ b/src/mongo/scripting/v8_deadline_monitor.h
@@ -67,7 +67,6 @@ namespace mongo {
public:
DeadlineMonitor() :
_tasks(),
- _deadlineMutex("DeadlineMonitor"),
_newDeadlineAvailable(),
_nearestDeadlineWallclock(kMaxDeadline),
_monitorThread(&mongo::DeadlineMonitor<_Task>::deadlineMonitorThread, this) {
@@ -88,7 +87,7 @@ namespace mongo {
*/
void startDeadline(_Task* const task, uint64_t timeoutMs) {
uint64_t now = curTimeMillis64();
- scoped_lock lk(_deadlineMutex);
+ boost::lock_guard<boost::mutex> lk(_deadlineMutex);
// insert or update the deadline
std::pair<typename TaskDeadlineMap::iterator, bool> inserted =
@@ -110,7 +109,7 @@ namespace mongo {
* @return true if the task was found and erased
*/
bool stopDeadline(_Task* const task) {
- scoped_lock lk(_deadlineMutex);
+ boost::lock_guard<boost::mutex> lk(_deadlineMutex);
return _tasks.erase(task);
}
@@ -121,7 +120,7 @@ namespace mongo {
* _Task::kill() is invoked.
*/
void deadlineMonitorThread() {
- scoped_lock lk(_deadlineMutex);
+ boost::unique_lock<boost::mutex> lk(_deadlineMutex);
while (true) {
// get the next interval to wait
@@ -131,11 +130,11 @@ namespace mongo {
while (_nearestDeadlineWallclock > now) {
uint64_t nearestDeadlineMs;
if (_nearestDeadlineWallclock == kMaxDeadline) {
- _newDeadlineAvailable.wait(lk.boost());
+ _newDeadlineAvailable.wait(lk);
}
else {
nearestDeadlineMs = _nearestDeadlineWallclock - now;
- _newDeadlineAvailable.timed_wait(lk.boost(),
+ _newDeadlineAvailable.timed_wait(lk,
boost::posix_time::milliseconds(nearestDeadlineMs));
}
now = curTimeMillis64();
diff --git a/src/mongo/scripting/v8_deadline_monitor_test.cpp b/src/mongo/scripting/v8_deadline_monitor_test.cpp
index 1ad95d63816..f6f43bd8ff4 100644
--- a/src/mongo/scripting/v8_deadline_monitor_test.cpp
+++ b/src/mongo/scripting/v8_deadline_monitor_test.cpp
@@ -43,18 +43,18 @@ namespace mongo {
class TaskGroup {
public:
- TaskGroup() : _m("TestGroup"), _c(), _killCount(0), _targetKillCount(0) { }
+ TaskGroup() : _c(), _killCount(0), _targetKillCount(0) { }
void noteKill() {
- scoped_lock lk(_m);
+ boost::lock_guard<boost::mutex> lk(_m);
++_killCount;
if (_killCount >= _targetKillCount)
_c.notify_one();
}
void waitForKillCount(uint64_t target) {
- scoped_lock lk(_m);
+ boost::unique_lock<boost::mutex> lk(_m);
_targetKillCount = target;
while (_killCount < _targetKillCount)
- _c.wait(lk.boost());
+ _c.wait(lk);
}
private:
mongo::mutex _m;
diff --git a/src/mongo/shell/dbshell.cpp b/src/mongo/shell/dbshell.cpp
index 3e6195ff78a..77c807ae908 100644
--- a/src/mongo/shell/dbshell.cpp
+++ b/src/mongo/shell/dbshell.cpp
@@ -185,7 +185,7 @@ namespace mongo {
void exitCleanly(ExitCode code) {
{
- mongo::mutex::scoped_lock lk(mongo::shell_utils::mongoProgramOutputMutex);
+ boost::lock_guard<boost::mutex> lk(mongo::shell_utils::mongoProgramOutputMutex);
mongo::dbexitCalled = true;
}
@@ -896,7 +896,7 @@ int _main( int argc, char* argv[], char **envp ) {
}
{
- mongo::mutex::scoped_lock lk(mongo::shell_utils::mongoProgramOutputMutex);
+ boost::lock_guard<boost::mutex> lk(mongo::shell_utils::mongoProgramOutputMutex);
mongo::dbexitCalled = true;
}
return 0;
diff --git a/src/mongo/shell/shell_utils.cpp b/src/mongo/shell/shell_utils.cpp
index ee489a75dd4..77cf1103f1d 100644
--- a/src/mongo/shell/shell_utils.cpp
+++ b/src/mongo/shell/shell_utils.cpp
@@ -286,22 +286,20 @@ namespace mongo {
return _confirmed = matchedY;
}
- ConnectionRegistry::ConnectionRegistry() :
- _mutex( "connectionRegistryMutex" ) {
- }
-
+ ConnectionRegistry::ConnectionRegistry() = default;
+
void ConnectionRegistry::registerConnection( DBClientWithCommands &client ) {
BSONObj info;
if ( client.runCommand( "admin", BSON( "whatsmyuri" << 1 ), info ) ) {
string connstr = dynamic_cast<DBClientBase&>( client ).getServerAddress();
- mongo::mutex::scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
_connectionUris[ connstr ].insert( info[ "you" ].str() );
}
}
void ConnectionRegistry::killOperationsOnAllConnections( bool withPrompt ) const {
Prompter prompter( "do you want to kill the current op(s) on the server?" );
- mongo::mutex::scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
for( map<string,set<string> >::const_iterator i = _connectionUris.begin();
i != _connectionUris.end(); ++i ) {
string errmsg;
diff --git a/src/mongo/shell/shell_utils_launcher.cpp b/src/mongo/shell/shell_utils_launcher.cpp
index 48ff2dd3c36..07d1828eaf5 100644
--- a/src/mongo/shell/shell_utils_launcher.cpp
+++ b/src/mongo/shell/shell_utils_launcher.cpp
@@ -169,12 +169,12 @@ namespace mongo {
ProgramRegistry &registry = *( new ProgramRegistry() );
void goingAwaySoon() {
- mongo::mutex::scoped_lock lk( mongoProgramOutputMutex );
+ boost::lock_guard<boost::mutex> lk( mongoProgramOutputMutex );
mongo::dbexitCalled = true;
}
void ProgramOutputMultiplexer::appendLine( int port, ProcessId pid, const char *line ) {
- mongo::mutex::scoped_lock lk( mongoProgramOutputMutex );
+ boost::lock_guard<boost::mutex> lk( mongoProgramOutputMutex );
if( mongo::dbexitCalled ) throw "program is terminating";
stringstream buf;
if ( port > 0 )
@@ -187,7 +187,7 @@ namespace mongo {
}
string ProgramOutputMultiplexer::str() const {
- mongo::mutex::scoped_lock lk( mongoProgramOutputMutex );
+ boost::lock_guard<boost::mutex> lk( mongoProgramOutputMutex );
string ret = _buffer.str();
size_t len = ret.length();
if ( len > 100000 ) {
@@ -197,7 +197,7 @@ namespace mongo {
}
void ProgramOutputMultiplexer::clear() {
- mongo::mutex::scoped_lock lk( mongoProgramOutputMutex );
+ boost::lock_guard<boost::mutex> lk( mongoProgramOutputMutex );
_buffer.str( "" );
}
diff --git a/src/mongo/util/background.cpp b/src/mongo/util/background.cpp
index 090b665ddd4..c98e674ccc8 100644
--- a/src/mongo/util/background.cpp
+++ b/src/mongo/util/background.cpp
@@ -34,6 +34,7 @@
#include "mongo/util/background.h"
#include <boost/thread/condition.hpp>
+#include <boost/thread/mutex.hpp>
#include <boost/thread/once.hpp>
#include <boost/thread/thread.hpp>
@@ -56,9 +57,7 @@ namespace mongo {
class PeriodicTaskRunner : public BackgroundJob {
public:
- PeriodicTaskRunner()
- : _mutex("PeriodicTaskRunner")
- , _shutdownRequested(false) {}
+ PeriodicTaskRunner() : _shutdownRequested(false) {}
void add( PeriodicTask* task );
void remove( PeriodicTask* task );
@@ -85,7 +84,7 @@ namespace mongo {
void _runTask( PeriodicTask* task );
// _mutex protects the _shutdownRequested flag and the _tasks vector.
- mongo::mutex _mutex;
+ boost::mutex _mutex;
// The condition variable is used to sleep for the interval between task
// executions, and is notified when the _shutdownRequested flag is toggled.
@@ -133,12 +132,9 @@ namespace mongo {
// both the BackgroundJob and the internal thread point to JobStatus
struct BackgroundJob::JobStatus {
- JobStatus()
- : mutex( "backgroundJob" )
- , state( NotStarted ) {
- }
+ JobStatus() : state(NotStarted) {}
- mongo::mutex mutex;
+ boost::mutex mutex;
boost::condition done;
State state;
};
@@ -182,7 +178,7 @@ namespace mongo {
{
// It is illegal to access any state owned by this BackgroundJob after leaving this
// scope, with the exception of the call to 'delete this' below.
- scoped_lock l( _status->mutex );
+ boost::unique_lock<boost::mutex> l( _status->mutex );
_status->state = Done;
_status->done.notify_all();
}
@@ -192,7 +188,7 @@ namespace mongo {
}
void BackgroundJob::go() {
- scoped_lock l( _status->mutex );
+ boost::unique_lock<boost::mutex> l( _status->mutex );
massert( 17234, mongoutils::str::stream()
<< "backgroundJob already running: " << name(),
_status->state != Running );
@@ -206,7 +202,7 @@ namespace mongo {
}
Status BackgroundJob::cancel() {
- scoped_lock l( _status->mutex );
+ boost::unique_lock<boost::mutex> l( _status->mutex );
if ( _status->state == Running )
return Status( ErrorCodes::IllegalOperation,
@@ -222,27 +218,27 @@ namespace mongo {
bool BackgroundJob::wait( unsigned msTimeOut ) {
verify( !_selfDelete ); // you cannot call wait on a self-deleting job
- scoped_lock l( _status->mutex );
+ boost::unique_lock<boost::mutex> l( _status->mutex );
while ( _status->state != Done ) {
if ( msTimeOut ) {
boost::xtime deadline = incxtimemillis( msTimeOut );
- if ( !_status->done.timed_wait( l.boost() , deadline ) )
+ if ( !_status->done.timed_wait( l , deadline ) )
return false;
}
else {
- _status->done.wait( l.boost() );
+ _status->done.wait( l );
}
}
return true;
}
BackgroundJob::State BackgroundJob::getState() const {
- scoped_lock l( _status->mutex );
+ boost::unique_lock<boost::mutex> l( _status->mutex );
return _status->state;
}
bool BackgroundJob::running() const {
- scoped_lock l( _status->mutex );
+ boost::unique_lock<boost::mutex> l( _status->mutex );
return _status->state == Running;
}
@@ -297,12 +293,12 @@ namespace mongo {
}
void PeriodicTaskRunner::add( PeriodicTask* task ) {
- mutex::scoped_lock lock( _mutex );
+ boost::lock_guard<boost::mutex> lock( _mutex );
_tasks.push_back( task );
}
void PeriodicTaskRunner::remove( PeriodicTask* task ) {
- mutex::scoped_lock lock( _mutex );
+ boost::lock_guard<boost::mutex> lock( _mutex );
for ( size_t i = 0; i != _tasks.size(); i++ ) {
if ( _tasks[i] == task ) {
_tasks[i] = NULL;
@@ -313,7 +309,7 @@ namespace mongo {
Status PeriodicTaskRunner::stop( int gracePeriodMillis ) {
{
- mutex::scoped_lock lock( _mutex );
+ boost::lock_guard<boost::mutex> lock( _mutex );
_shutdownRequested = true;
_cond.notify_one();
}
@@ -332,10 +328,10 @@ namespace mongo {
const stdx::function<bool()> predicate =
stdx::bind( &PeriodicTaskRunner::_isShutdownRequested, this );
- mutex::scoped_lock lock( _mutex );
+ boost::unique_lock<boost::mutex> lock( _mutex );
while ( !predicate() ) {
const boost::xtime deadline = incxtimemillis( waitMillis );
- if ( !_cond.timed_wait( lock.boost(), deadline, predicate ) )
+ if ( !_cond.timed_wait( lock, deadline, predicate ) )
_runTasks();
}
}
diff --git a/src/mongo/util/background_job_test.cpp b/src/mongo/util/background_job_test.cpp
index 030c0f3120e..1b2f197afcc 100644
--- a/src/mongo/util/background_job_test.cpp
+++ b/src/mongo/util/background_job_test.cpp
@@ -104,9 +104,7 @@ namespace {
class Job : public BackgroundJob {
public:
- Job()
- : _mutex("BackgroundJobLifeCycle::Go")
- , _hasRun(false) {}
+ Job() : _hasRun(false) {}
virtual std::string name() const {
return "BackgroundLifeCycle::CannotCallGoAgain";
@@ -114,7 +112,7 @@ namespace {
virtual void run() {
{
- mongo::scoped_lock lock( _mutex );
+ boost::lock_guard<boost::mutex> lock( _mutex );
ASSERT_FALSE( _hasRun );
_hasRun = true;
}
diff --git a/src/mongo/util/concurrency/mutex.h b/src/mongo/util/concurrency/mutex.h
index 67eb565c37d..5e207e7662b 100644
--- a/src/mongo/util/concurrency/mutex.h
+++ b/src/mongo/util/concurrency/mutex.h
@@ -75,45 +75,7 @@ namespace mongo {
~StaticObserver() { _destroyingStatics = true; }
};
- /** On pthread systems, it is an error to destroy a mutex while held (boost mutex
- * may use pthread). Static global mutexes may be held upon shutdown in our
- * implementation, and this way we avoid destroying them.
- * NOT recursive.
- */
- class mutex : boost::noncopyable {
- public:
- const char * const _name;
- // NOINLINE so that 'mutex::mutex' is always in the frame, this makes
- // it easier for us to suppress the leaks caused by the static observer.
- NOINLINE_DECL mutex(const char *name) : _name(name)
- {
- _m = new boost::timed_mutex();
- IGNORE_OBJECT( _m ); // Turn-off heap checking on _m
- }
- ~mutex() {
- if( !StaticObserver::_destroyingStatics ) {
- UNIGNORE_OBJECT( _m );
- delete _m;
- }
- }
-
- class scoped_lock : boost::noncopyable {
- public:
- scoped_lock( mongo::mutex &m ) :
- _l( m.boost() ) {
- }
- ~scoped_lock() {
- }
- boost::unique_lock<boost::timed_mutex>& boost() { return _l; }
- private:
- boost::unique_lock<boost::timed_mutex> _l;
- };
- private:
- boost::timed_mutex &boost() { return *_m; }
- boost::timed_mutex *_m;
- };
-
- typedef mongo::mutex::scoped_lock scoped_lock;
+ using mutex = boost::mutex;
/** The concept with SimpleMutex is that it is a basic lock/unlock with no
special functionality (such as try and try timeout). Thus it can be
diff --git a/src/mongo/util/concurrency/synchronization.cpp b/src/mongo/util/concurrency/synchronization.cpp
index 3121989123a..d7cf3575c32 100644
--- a/src/mongo/util/concurrency/synchronization.cpp
+++ b/src/mongo/util/concurrency/synchronization.cpp
@@ -59,20 +59,20 @@ namespace {
}
}
- Notification::Notification() : _mutex ( "Notification" ) {
+ Notification::Notification() {
lookFor = 1;
cur = 0;
}
void Notification::waitToBeNotified() {
- scoped_lock lock( _mutex );
+ boost::unique_lock<boost::mutex> lock( _mutex );
while ( lookFor != cur )
- _condition.wait( lock.boost() );
+ _condition.wait(lock);
lookFor++;
}
void Notification::notifyOne() {
- scoped_lock lock( _mutex );
+ boost::lock_guard<boost::mutex> lock( _mutex );
verify( cur != lookFor );
cur++;
_condition.notify_one();
@@ -80,36 +80,36 @@ namespace {
/* --- NotifyAll --- */
- NotifyAll::NotifyAll() : _mutex("NotifyAll") {
+ NotifyAll::NotifyAll() {
_lastDone = 0;
_lastReturned = 0;
_nWaiting = 0;
}
NotifyAll::When NotifyAll::now() {
- scoped_lock lock( _mutex );
+ boost::lock_guard<boost::mutex> lock( _mutex );
return ++_lastReturned;
}
void NotifyAll::waitFor(When e) {
- scoped_lock lock( _mutex );
+ boost::unique_lock<boost::mutex> lock( _mutex );
++_nWaiting;
while( _lastDone < e ) {
- _condition.wait( lock.boost() );
+ _condition.wait(lock);
}
}
void NotifyAll::awaitBeyondNow() {
- scoped_lock lock( _mutex );
+ boost::unique_lock<boost::mutex> lock( _mutex );
++_nWaiting;
When e = ++_lastReturned;
while( _lastDone <= e ) {
- _condition.wait( lock.boost() );
+ _condition.wait(lock);
}
}
void NotifyAll::notifyAll(When e) {
- scoped_lock lock( _mutex );
+ boost::unique_lock<boost::mutex> lock( _mutex );
_lastDone = e;
_nWaiting = 0;
_condition.notify_all();
diff --git a/src/mongo/util/concurrency/thread_pool.cpp b/src/mongo/util/concurrency/thread_pool.cpp
index 2f16d4e5fa4..3056294e163 100644
--- a/src/mongo/util/concurrency/thread_pool.cpp
+++ b/src/mongo/util/concurrency/thread_pool.cpp
@@ -102,7 +102,7 @@ namespace mongo {
};
ThreadPool::ThreadPool(int nThreads, const std::string& threadNamePrefix)
- : _mutex("ThreadPool"), _tasksRemaining(0)
+ : _tasksRemaining(0)
, _nThreads(nThreads)
, _threadNamePrefix(threadNamePrefix) {
startThreads();
@@ -111,13 +111,13 @@ namespace mongo {
ThreadPool::ThreadPool(const DoNotStartThreadsTag&,
int nThreads,
const std::string& threadNamePrefix)
- : _mutex("ThreadPool"), _tasksRemaining(0)
+ : _tasksRemaining(0)
, _nThreads(nThreads)
, _threadNamePrefix(threadNamePrefix) {
}
void ThreadPool::startThreads() {
- scoped_lock lock(_mutex);
+ boost::lock_guard<boost::mutex> lock(_mutex);
for (int i = 0; i < _nThreads; ++i) {
const std::string threadName(_threadNamePrefix.empty() ?
_threadNamePrefix :
@@ -145,14 +145,14 @@ namespace mongo {
}
void ThreadPool::join() {
- scoped_lock lock(_mutex);
+ boost::unique_lock<boost::mutex> lock(_mutex);
while(_tasksRemaining) {
- _condition.wait(lock.boost());
+ _condition.wait(lock);
}
}
void ThreadPool::schedule(Task task) {
- scoped_lock lock(_mutex);
+ boost::lock_guard<boost::mutex> lock(_mutex);
_tasksRemaining++;
@@ -167,7 +167,7 @@ namespace mongo {
// should only be called by a worker from the worker thread
void ThreadPool::task_done(Worker* worker) {
- scoped_lock lock(_mutex);
+ boost::lock_guard<boost::mutex> lock(_mutex);
if (!_tasks.empty()) {
worker->set_task(_tasks.front());
diff --git a/src/mongo/util/concurrency/ticketholder.cpp b/src/mongo/util/concurrency/ticketholder.cpp
index efec9b50ee2..2bda04c5c10 100644
--- a/src/mongo/util/concurrency/ticketholder.cpp
+++ b/src/mongo/util/concurrency/ticketholder.cpp
@@ -122,38 +122,33 @@ namespace mongo {
#else
- TicketHolder::TicketHolder( int num )
- : _outof(num),
- _num(num),
- _mutex("TicketHolder") {
- }
+ TicketHolder::TicketHolder( int num ) : _outof(num), _num(num) {}
- TicketHolder::~TicketHolder(){
- }
+ TicketHolder::~TicketHolder() = default;
bool TicketHolder::tryAcquire() {
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
return _tryAcquire();
}
void TicketHolder::waitForTicket() {
- scoped_lock lk( _mutex );
+ boost::unique_lock<boost::mutex> lk( _mutex );
while( ! _tryAcquire() ) {
- _newTicket.wait( lk.boost() );
+ _newTicket.wait( lk );
}
}
void TicketHolder::release() {
{
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
_num++;
}
_newTicket.notify_one();
}
Status TicketHolder::resize( int newSize ) {
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
int used = _outof.load() - _num;
if ( used > newSize ) {
diff --git a/src/mongo/util/fail_point.cpp b/src/mongo/util/fail_point.cpp
index a1d00b7830f..bbeb9cf00c9 100644
--- a/src/mongo/util/fail_point.cpp
+++ b/src/mongo/util/fail_point.cpp
@@ -80,12 +80,7 @@ namespace {
failPointPrng.getMake()->resetSeed(seed);
}
- FailPoint::FailPoint():
- _fpInfo(0),
- _mode(off),
- _timesOrPeriod(0),
- _modMutex("failPointMutex") {
- }
+ FailPoint::FailPoint() : _fpInfo(0), _mode(off), _timesOrPeriod(0) {}
void FailPoint::shouldFailCloseBlock() {
_fpInfo.subtractAndFetch(1);
@@ -100,7 +95,7 @@ namespace {
* 3. Sets the new mode.
*/
- scoped_lock scoped(_modMutex);
+ boost::lock_guard<boost::mutex> scoped(_modMutex);
// Step 1
disableFailPoint();
@@ -193,7 +188,7 @@ namespace {
BSONObj FailPoint::toBSON() const {
BSONObjBuilder builder;
- scoped_lock scoped(_modMutex);
+ boost::lock_guard<boost::mutex> scoped(_modMutex);
builder.append("mode", _mode);
builder.append("data", _data);
diff --git a/src/mongo/util/fail_point.h b/src/mongo/util/fail_point.h
index e2ff5089fd7..6ca1df82e14 100644
--- a/src/mongo/util/fail_point.h
+++ b/src/mongo/util/fail_point.h
@@ -28,10 +28,11 @@
#pragma once
+#include <boost/thread/mutex.hpp>
+
#include "mongo/base/disallow_copying.h"
#include "mongo/db/jsobj.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/util/concurrency/mutex.h"
namespace mongo {
/**
@@ -157,7 +158,7 @@ namespace mongo {
BSONObj _data;
// protects _mode, _timesOrPeriod, _data
- mutable mutex _modMutex;
+ mutable boost::mutex _modMutex;
/**
* Enables this fail point.
diff --git a/src/mongo/util/file_allocator.cpp b/src/mongo/util/file_allocator.cpp
index cf60e1cdadb..5acf3d68cb2 100644
--- a/src/mongo/util/file_allocator.cpp
+++ b/src/mongo/util/file_allocator.cpp
@@ -117,9 +117,7 @@ namespace mongo {
return parent;
}
- FileAllocator::FileAllocator()
- : _pendingMutex("FileAllocator"), _failed() {
- }
+ FileAllocator::FileAllocator() : _failed() {}
void FileAllocator::start() {
@@ -127,7 +125,7 @@ namespace mongo {
}
void FileAllocator::requestAllocation( const string &name, long &size ) {
- scoped_lock lk( _pendingMutex );
+ boost::lock_guard<boost::mutex> lk( _pendingMutex );
if ( _failed )
return;
long oldSize = prevSize( name );
@@ -141,7 +139,7 @@ namespace mongo {
}
void FileAllocator::allocateAsap( const string &name, unsigned long long &size ) {
- scoped_lock lk( _pendingMutex );
+ boost::unique_lock<boost::mutex> lk( _pendingMutex );
// In case the allocator is in failed state, check once before starting so that subsequent
// requests for the same database would fail fast after the first one has failed.
@@ -166,7 +164,7 @@ namespace mongo {
_pendingUpdated.notify_all();
while( inProgress( name ) ) {
checkFailure();
- _pendingUpdated.wait( lk.boost() );
+ _pendingUpdated.wait(lk);
}
}
@@ -174,9 +172,9 @@ namespace mongo {
void FileAllocator::waitUntilFinished() const {
if ( _failed )
return;
- scoped_lock lk( _pendingMutex );
+ boost::unique_lock<boost::mutex> lk( _pendingMutex );
while( _pending.size() != 0 )
- _pendingUpdated.wait( lk.boost() );
+ _pendingUpdated.wait(lk);
}
// TODO: pull this out to per-OS files once they exist
@@ -361,15 +359,15 @@ namespace mongo {
}
while( 1 ) {
{
- scoped_lock lk( fa->_pendingMutex );
+ boost::unique_lock<boost::mutex> lk( fa->_pendingMutex );
if ( fa->_pending.size() == 0 )
- fa->_pendingUpdated.wait( lk.boost() );
+ fa->_pendingUpdated.wait(lk);
}
while( 1 ) {
string name;
long size = 0;
{
- scoped_lock lk( fa->_pendingMutex );
+ boost::lock_guard<boost::mutex> lk( fa->_pendingMutex );
if ( fa->_pending.size() == 0 )
break;
name = fa->_pending.front();
@@ -441,7 +439,7 @@ namespace mongo {
}
{
- scoped_lock lk(fa->_pendingMutex);
+ boost::lock_guard<boost::mutex> lk(fa->_pendingMutex);
fa->_failed = true;
// TODO: Should we remove the file from pending?
@@ -454,7 +452,7 @@ namespace mongo {
}
{
- scoped_lock lk( fa->_pendingMutex );
+ boost::lock_guard<boost::mutex> lk( fa->_pendingMutex );
fa->_pendingSize.erase( name );
fa->_pending.pop_front();
fa->_pendingUpdated.notify_all();
diff --git a/src/mongo/util/mmap_win.cpp b/src/mongo/util/mmap_win.cpp
index 354ca4df62c..86e6c1e6b6b 100644
--- a/src/mongo/util/mmap_win.cpp
+++ b/src/mongo/util/mmap_win.cpp
@@ -69,7 +69,7 @@ namespace mongo {
// 2. Prevents calls to VirtualProtect while we remapping files.
// Lock Ordering:
// - If taken, must be after previewViews._m to prevent deadlocks
- mutex mapViewMutex("mapView");
+ mutex mapViewMutex;
MAdvise::MAdvise(void *,unsigned, Advice) { }
MAdvise::~MAdvise() { }
@@ -165,7 +165,7 @@ namespace mongo {
boost::lock_guard<boost::mutex> lk(_flushMutex);
{
- scoped_lock lk(mapViewMutex);
+ boost::lock_guard<boost::mutex> lk(mapViewMutex);
for (vector<void*>::iterator i = views.begin(); i != views.end(); i++) {
UnmapViewOfFile(*i);
@@ -187,7 +187,7 @@ namespace mongo {
void* MemoryMappedFile::createReadOnlyMap() {
verify( maphandle );
- scoped_lock lk(mapViewMutex);
+ boost::lock_guard<boost::mutex> lk(mapViewMutex);
void* readOnlyMapAddress = NULL;
int current_retry = 0;
@@ -299,7 +299,7 @@ namespace mongo {
void *view = 0;
{
- scoped_lock lk(mapViewMutex);
+ boost::lock_guard<boost::mutex> lk(mapViewMutex);
DWORD access = ( options & READONLY ) ? FILE_MAP_READ : FILE_MAP_ALL_ACCESS;
int current_retry = 0;
@@ -364,7 +364,7 @@ namespace mongo {
void* MemoryMappedFile::createPrivateMap() {
verify( maphandle );
- scoped_lock lk(mapViewMutex);
+ boost::lock_guard<boost::mutex> lk(mapViewMutex);
LPVOID thisAddress = getNextMemoryMappedFileLocation( len );
@@ -412,7 +412,7 @@ namespace mongo {
privateViews.clearWritableBits(oldPrivateAddr, len);
- scoped_lock lk(mapViewMutex);
+ boost::lock_guard<boost::mutex> lk(mapViewMutex);
if( !UnmapViewOfFile(oldPrivateAddr) ) {
DWORD dosError = GetLastError();
diff --git a/src/mongo/util/net/listen.cpp b/src/mongo/util/net/listen.cpp
index f260573039e..8841be6a787 100644
--- a/src/mongo/util/net/listen.cpp
+++ b/src/mongo/util/net/listen.cpp
@@ -643,7 +643,7 @@ namespace mongo {
std::set<std::string>* paths;
{
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
sockets = _sockets;
_sockets = new std::set<int>();
paths = _socketPaths;
diff --git a/src/mongo/util/net/listen.h b/src/mongo/util/net/listen.h
index 6efb3a717b3..481a646239f 100644
--- a/src/mongo/util/net/listen.h
+++ b/src/mongo/util/net/listen.h
@@ -142,26 +142,25 @@ namespace mongo {
class ListeningSockets {
public:
ListeningSockets()
- : _mutex("ListeningSockets")
- , _sockets( new std::set<int>() )
+ : _sockets( new std::set<int>() )
, _socketPaths( new std::set<std::string>() )
{ }
void add( int sock ) {
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
_sockets->insert( sock );
}
void addPath( const std::string& path ) {
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
_socketPaths->insert( path );
}
void remove( int sock ) {
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
_sockets->erase( sock );
}
void closeAll();
static ListeningSockets* get();
private:
- mongo::mutex _mutex;
+ boost::mutex _mutex;
std::set<int>* _sockets;
std::set<std::string>* _socketPaths; // for unix domain sockets
static ListeningSockets* _instance;
diff --git a/src/mongo/util/net/message_port.cpp b/src/mongo/util/net/message_port.cpp
index 1ab8355dc05..3b6e3204417 100644
--- a/src/mongo/util/net/message_port.cpp
+++ b/src/mongo/util/net/message_port.cpp
@@ -115,9 +115,9 @@ namespace mongo {
std::set<MessagingPort*> ports;
mongo::mutex m;
public:
- Ports() : ports(), m("Ports") {}
+ Ports() : ports() {}
void closeAll(unsigned skip_mask) {
- scoped_lock bl(m);
+ boost::lock_guard<boost::mutex> bl(m);
for ( std::set<MessagingPort*>::iterator i = ports.begin(); i != ports.end(); i++ ) {
if( (*i)->tag & skip_mask )
continue;
@@ -125,11 +125,11 @@ namespace mongo {
}
}
void insert(MessagingPort* p) {
- scoped_lock bl(m);
+ boost::lock_guard<boost::mutex> bl(m);
ports.insert(p);
}
void erase(MessagingPort* p) {
- scoped_lock bl(m);
+ boost::lock_guard<boost::mutex> bl(m);
ports.erase(p);
}
};
diff --git a/src/mongo/util/queue.h b/src/mongo/util/queue.h
index 07ba57e87ab..7ae46f97325 100644
--- a/src/mongo/util/queue.h
+++ b/src/mongo/util/queue.h
@@ -56,26 +56,23 @@ namespace mongo {
typedef size_t (*getSizeFunc)(const T& t);
public:
BlockingQueue() :
- _lock("BlockingQueue"),
_maxSize(std::numeric_limits<std::size_t>::max()),
_currentSize(0),
_getSize(&_getSizeDefault) {}
BlockingQueue(size_t size) :
- _lock("BlockingQueue(bounded)"),
_maxSize(size),
_currentSize(0),
_getSize(&_getSizeDefault) {}
BlockingQueue(size_t size, getSizeFunc f) :
- _lock("BlockingQueue(custom size)"),
_maxSize(size),
_currentSize(0),
_getSize(f) {}
void push(T const& t) {
- scoped_lock l( _lock );
+ boost::unique_lock<boost::mutex> l( _lock );
size_t tSize = _getSize(t);
while (_currentSize + tSize > _maxSize) {
- _cvNoLongerFull.wait( l.boost() );
+ _cvNoLongerFull.wait( l );
}
_queue.push( t );
_currentSize += tSize;
@@ -83,7 +80,7 @@ namespace mongo {
}
bool empty() const {
- scoped_lock l( _lock );
+ boost::lock_guard<boost::mutex> l( _lock );
return _queue.empty();
}
@@ -91,7 +88,7 @@ namespace mongo {
* The size as measured by the size function. Default to counting each item
*/
size_t size() const {
- scoped_lock l( _lock );
+ boost::lock_guard<boost::mutex> l( _lock );
return _currentSize;
}
@@ -106,19 +103,19 @@ namespace mongo {
* The number/count of items in the queue ( _queue.size() )
*/
size_t count() const {
- scoped_lock l( _lock );
+ boost::lock_guard<boost::mutex> l( _lock );
return _queue.size();
}
void clear() {
- scoped_lock l(_lock);
+ boost::lock_guard<boost::mutex> l(_lock);
_queue = std::queue<T>();
_currentSize = 0;
_cvNoLongerFull.notify_one();
}
bool tryPop( T & t ) {
- scoped_lock l( _lock );
+ boost::lock_guard<boost::mutex> l( _lock );
if ( _queue.empty() )
return false;
@@ -132,9 +129,9 @@ namespace mongo {
T blockingPop() {
- scoped_lock l( _lock );
+ boost::unique_lock<boost::mutex> l( _lock );
while( _queue.empty() )
- _cvNoLongerEmpty.wait( l.boost() );
+ _cvNoLongerEmpty.wait( l );
T t = _queue.front();
_queue.pop();
@@ -158,9 +155,9 @@ namespace mongo {
boost::xtime_get(&xt, MONGO_BOOST_TIME_UTC);
xt.sec += maxSecondsToWait;
- scoped_lock l( _lock );
+ boost::unique_lock<boost::mutex> l( _lock );
while( _queue.empty() ) {
- if ( ! _cvNoLongerEmpty.timed_wait( l.boost() , xt ) )
+ if ( ! _cvNoLongerEmpty.timed_wait( l , xt ) )
return false;
}
@@ -180,9 +177,9 @@ namespace mongo {
boost::xtime_get(&xt, MONGO_BOOST_TIME_UTC);
xt.sec += maxSecondsToWait;
- scoped_lock l( _lock );
+ boost::unique_lock<boost::mutex> l( _lock );
while( _queue.empty() ) {
- if ( ! _cvNoLongerEmpty.timed_wait( l.boost() , xt ) )
+ if ( ! _cvNoLongerEmpty.timed_wait( l , xt ) )
return false;
}
@@ -194,7 +191,7 @@ namespace mongo {
// only one consumer
bool peek(T& t) {
- scoped_lock l( _lock );
+ boost::unique_lock<boost::mutex> l( _lock );
if (_queue.empty()) {
return false;
}