summaryrefslogtreecommitdiff
path: root/src/mongo/util
diff options
context:
space:
mode:
authorAndy Schwerin <schwerin@mongodb.com>2015-02-27 18:33:28 -0500
committerAndy Schwerin <schwerin@mongodb.com>2015-03-12 17:11:20 -0400
commit7cd9cf303c824478f0f6d60cadfcc1a25bdb21f2 (patch)
treeebeec8c3c2dc21359b941d95e2109e355542e65f /src/mongo/util
parent7ee3d124070db157181bc1b24f2b84913957c388 (diff)
downloadmongo-7cd9cf303c824478f0f6d60cadfcc1a25bdb21f2.tar.gz
SERVER-17310 Make mongo::mutex a typedef of boost::mutex and remove mongo::scoped_lock.
Diffstat (limited to 'src/mongo/util')
-rw-r--r--src/mongo/util/background.cpp40
-rw-r--r--src/mongo/util/background_job_test.cpp6
-rw-r--r--src/mongo/util/concurrency/mutex.h40
-rw-r--r--src/mongo/util/concurrency/synchronization.cpp22
-rw-r--r--src/mongo/util/concurrency/thread_pool.cpp14
-rw-r--r--src/mongo/util/concurrency/ticketholder.cpp19
-rw-r--r--src/mongo/util/fail_point.cpp11
-rw-r--r--src/mongo/util/fail_point.h5
-rw-r--r--src/mongo/util/file_allocator.cpp24
-rw-r--r--src/mongo/util/mmap_win.cpp12
-rw-r--r--src/mongo/util/net/listen.cpp2
-rw-r--r--src/mongo/util/net/listen.h11
-rw-r--r--src/mongo/util/net/message_port.cpp8
-rw-r--r--src/mongo/util/queue.h31
14 files changed, 93 insertions, 152 deletions
diff --git a/src/mongo/util/background.cpp b/src/mongo/util/background.cpp
index 090b665ddd4..c98e674ccc8 100644
--- a/src/mongo/util/background.cpp
+++ b/src/mongo/util/background.cpp
@@ -34,6 +34,7 @@
#include "mongo/util/background.h"
#include <boost/thread/condition.hpp>
+#include <boost/thread/mutex.hpp>
#include <boost/thread/once.hpp>
#include <boost/thread/thread.hpp>
@@ -56,9 +57,7 @@ namespace mongo {
class PeriodicTaskRunner : public BackgroundJob {
public:
- PeriodicTaskRunner()
- : _mutex("PeriodicTaskRunner")
- , _shutdownRequested(false) {}
+ PeriodicTaskRunner() : _shutdownRequested(false) {}
void add( PeriodicTask* task );
void remove( PeriodicTask* task );
@@ -85,7 +84,7 @@ namespace mongo {
void _runTask( PeriodicTask* task );
// _mutex protects the _shutdownRequested flag and the _tasks vector.
- mongo::mutex _mutex;
+ boost::mutex _mutex;
// The condition variable is used to sleep for the interval between task
// executions, and is notified when the _shutdownRequested flag is toggled.
@@ -133,12 +132,9 @@ namespace mongo {
// both the BackgroundJob and the internal thread point to JobStatus
struct BackgroundJob::JobStatus {
- JobStatus()
- : mutex( "backgroundJob" )
- , state( NotStarted ) {
- }
+ JobStatus() : state(NotStarted) {}
- mongo::mutex mutex;
+ boost::mutex mutex;
boost::condition done;
State state;
};
@@ -182,7 +178,7 @@ namespace mongo {
{
// It is illegal to access any state owned by this BackgroundJob after leaving this
// scope, with the exception of the call to 'delete this' below.
- scoped_lock l( _status->mutex );
+ boost::unique_lock<boost::mutex> l( _status->mutex );
_status->state = Done;
_status->done.notify_all();
}
@@ -192,7 +188,7 @@ namespace mongo {
}
void BackgroundJob::go() {
- scoped_lock l( _status->mutex );
+ boost::unique_lock<boost::mutex> l( _status->mutex );
massert( 17234, mongoutils::str::stream()
<< "backgroundJob already running: " << name(),
_status->state != Running );
@@ -206,7 +202,7 @@ namespace mongo {
}
Status BackgroundJob::cancel() {
- scoped_lock l( _status->mutex );
+ boost::unique_lock<boost::mutex> l( _status->mutex );
if ( _status->state == Running )
return Status( ErrorCodes::IllegalOperation,
@@ -222,27 +218,27 @@ namespace mongo {
bool BackgroundJob::wait( unsigned msTimeOut ) {
verify( !_selfDelete ); // you cannot call wait on a self-deleting job
- scoped_lock l( _status->mutex );
+ boost::unique_lock<boost::mutex> l( _status->mutex );
while ( _status->state != Done ) {
if ( msTimeOut ) {
boost::xtime deadline = incxtimemillis( msTimeOut );
- if ( !_status->done.timed_wait( l.boost() , deadline ) )
+ if ( !_status->done.timed_wait( l , deadline ) )
return false;
}
else {
- _status->done.wait( l.boost() );
+ _status->done.wait( l );
}
}
return true;
}
BackgroundJob::State BackgroundJob::getState() const {
- scoped_lock l( _status->mutex );
+ boost::unique_lock<boost::mutex> l( _status->mutex );
return _status->state;
}
bool BackgroundJob::running() const {
- scoped_lock l( _status->mutex );
+ boost::unique_lock<boost::mutex> l( _status->mutex );
return _status->state == Running;
}
@@ -297,12 +293,12 @@ namespace mongo {
}
void PeriodicTaskRunner::add( PeriodicTask* task ) {
- mutex::scoped_lock lock( _mutex );
+ boost::lock_guard<boost::mutex> lock( _mutex );
_tasks.push_back( task );
}
void PeriodicTaskRunner::remove( PeriodicTask* task ) {
- mutex::scoped_lock lock( _mutex );
+ boost::lock_guard<boost::mutex> lock( _mutex );
for ( size_t i = 0; i != _tasks.size(); i++ ) {
if ( _tasks[i] == task ) {
_tasks[i] = NULL;
@@ -313,7 +309,7 @@ namespace mongo {
Status PeriodicTaskRunner::stop( int gracePeriodMillis ) {
{
- mutex::scoped_lock lock( _mutex );
+ boost::lock_guard<boost::mutex> lock( _mutex );
_shutdownRequested = true;
_cond.notify_one();
}
@@ -332,10 +328,10 @@ namespace mongo {
const stdx::function<bool()> predicate =
stdx::bind( &PeriodicTaskRunner::_isShutdownRequested, this );
- mutex::scoped_lock lock( _mutex );
+ boost::unique_lock<boost::mutex> lock( _mutex );
while ( !predicate() ) {
const boost::xtime deadline = incxtimemillis( waitMillis );
- if ( !_cond.timed_wait( lock.boost(), deadline, predicate ) )
+ if ( !_cond.timed_wait( lock, deadline, predicate ) )
_runTasks();
}
}
diff --git a/src/mongo/util/background_job_test.cpp b/src/mongo/util/background_job_test.cpp
index 030c0f3120e..1b2f197afcc 100644
--- a/src/mongo/util/background_job_test.cpp
+++ b/src/mongo/util/background_job_test.cpp
@@ -104,9 +104,7 @@ namespace {
class Job : public BackgroundJob {
public:
- Job()
- : _mutex("BackgroundJobLifeCycle::Go")
- , _hasRun(false) {}
+ Job() : _hasRun(false) {}
virtual std::string name() const {
return "BackgroundLifeCycle::CannotCallGoAgain";
@@ -114,7 +112,7 @@ namespace {
virtual void run() {
{
- mongo::scoped_lock lock( _mutex );
+ boost::lock_guard<boost::mutex> lock( _mutex );
ASSERT_FALSE( _hasRun );
_hasRun = true;
}
diff --git a/src/mongo/util/concurrency/mutex.h b/src/mongo/util/concurrency/mutex.h
index 67eb565c37d..5e207e7662b 100644
--- a/src/mongo/util/concurrency/mutex.h
+++ b/src/mongo/util/concurrency/mutex.h
@@ -75,45 +75,7 @@ namespace mongo {
~StaticObserver() { _destroyingStatics = true; }
};
- /** On pthread systems, it is an error to destroy a mutex while held (boost mutex
- * may use pthread). Static global mutexes may be held upon shutdown in our
- * implementation, and this way we avoid destroying them.
- * NOT recursive.
- */
- class mutex : boost::noncopyable {
- public:
- const char * const _name;
- // NOINLINE so that 'mutex::mutex' is always in the frame, this makes
- // it easier for us to suppress the leaks caused by the static observer.
- NOINLINE_DECL mutex(const char *name) : _name(name)
- {
- _m = new boost::timed_mutex();
- IGNORE_OBJECT( _m ); // Turn-off heap checking on _m
- }
- ~mutex() {
- if( !StaticObserver::_destroyingStatics ) {
- UNIGNORE_OBJECT( _m );
- delete _m;
- }
- }
-
- class scoped_lock : boost::noncopyable {
- public:
- scoped_lock( mongo::mutex &m ) :
- _l( m.boost() ) {
- }
- ~scoped_lock() {
- }
- boost::unique_lock<boost::timed_mutex>& boost() { return _l; }
- private:
- boost::unique_lock<boost::timed_mutex> _l;
- };
- private:
- boost::timed_mutex &boost() { return *_m; }
- boost::timed_mutex *_m;
- };
-
- typedef mongo::mutex::scoped_lock scoped_lock;
+ using mutex = boost::mutex;
/** The concept with SimpleMutex is that it is a basic lock/unlock with no
special functionality (such as try and try timeout). Thus it can be
diff --git a/src/mongo/util/concurrency/synchronization.cpp b/src/mongo/util/concurrency/synchronization.cpp
index 3121989123a..d7cf3575c32 100644
--- a/src/mongo/util/concurrency/synchronization.cpp
+++ b/src/mongo/util/concurrency/synchronization.cpp
@@ -59,20 +59,20 @@ namespace {
}
}
- Notification::Notification() : _mutex ( "Notification" ) {
+ Notification::Notification() {
lookFor = 1;
cur = 0;
}
void Notification::waitToBeNotified() {
- scoped_lock lock( _mutex );
+ boost::unique_lock<boost::mutex> lock( _mutex );
while ( lookFor != cur )
- _condition.wait( lock.boost() );
+ _condition.wait(lock);
lookFor++;
}
void Notification::notifyOne() {
- scoped_lock lock( _mutex );
+ boost::lock_guard<boost::mutex> lock( _mutex );
verify( cur != lookFor );
cur++;
_condition.notify_one();
@@ -80,36 +80,36 @@ namespace {
/* --- NotifyAll --- */
- NotifyAll::NotifyAll() : _mutex("NotifyAll") {
+ NotifyAll::NotifyAll() {
_lastDone = 0;
_lastReturned = 0;
_nWaiting = 0;
}
NotifyAll::When NotifyAll::now() {
- scoped_lock lock( _mutex );
+ boost::lock_guard<boost::mutex> lock( _mutex );
return ++_lastReturned;
}
void NotifyAll::waitFor(When e) {
- scoped_lock lock( _mutex );
+ boost::unique_lock<boost::mutex> lock( _mutex );
++_nWaiting;
while( _lastDone < e ) {
- _condition.wait( lock.boost() );
+ _condition.wait(lock);
}
}
void NotifyAll::awaitBeyondNow() {
- scoped_lock lock( _mutex );
+ boost::unique_lock<boost::mutex> lock( _mutex );
++_nWaiting;
When e = ++_lastReturned;
while( _lastDone <= e ) {
- _condition.wait( lock.boost() );
+ _condition.wait(lock);
}
}
void NotifyAll::notifyAll(When e) {
- scoped_lock lock( _mutex );
+ boost::unique_lock<boost::mutex> lock( _mutex );
_lastDone = e;
_nWaiting = 0;
_condition.notify_all();
diff --git a/src/mongo/util/concurrency/thread_pool.cpp b/src/mongo/util/concurrency/thread_pool.cpp
index 2f16d4e5fa4..3056294e163 100644
--- a/src/mongo/util/concurrency/thread_pool.cpp
+++ b/src/mongo/util/concurrency/thread_pool.cpp
@@ -102,7 +102,7 @@ namespace mongo {
};
ThreadPool::ThreadPool(int nThreads, const std::string& threadNamePrefix)
- : _mutex("ThreadPool"), _tasksRemaining(0)
+ : _tasksRemaining(0)
, _nThreads(nThreads)
, _threadNamePrefix(threadNamePrefix) {
startThreads();
@@ -111,13 +111,13 @@ namespace mongo {
ThreadPool::ThreadPool(const DoNotStartThreadsTag&,
int nThreads,
const std::string& threadNamePrefix)
- : _mutex("ThreadPool"), _tasksRemaining(0)
+ : _tasksRemaining(0)
, _nThreads(nThreads)
, _threadNamePrefix(threadNamePrefix) {
}
void ThreadPool::startThreads() {
- scoped_lock lock(_mutex);
+ boost::lock_guard<boost::mutex> lock(_mutex);
for (int i = 0; i < _nThreads; ++i) {
const std::string threadName(_threadNamePrefix.empty() ?
_threadNamePrefix :
@@ -145,14 +145,14 @@ namespace mongo {
}
void ThreadPool::join() {
- scoped_lock lock(_mutex);
+ boost::unique_lock<boost::mutex> lock(_mutex);
while(_tasksRemaining) {
- _condition.wait(lock.boost());
+ _condition.wait(lock);
}
}
void ThreadPool::schedule(Task task) {
- scoped_lock lock(_mutex);
+ boost::lock_guard<boost::mutex> lock(_mutex);
_tasksRemaining++;
@@ -167,7 +167,7 @@ namespace mongo {
// should only be called by a worker from the worker thread
void ThreadPool::task_done(Worker* worker) {
- scoped_lock lock(_mutex);
+ boost::lock_guard<boost::mutex> lock(_mutex);
if (!_tasks.empty()) {
worker->set_task(_tasks.front());
diff --git a/src/mongo/util/concurrency/ticketholder.cpp b/src/mongo/util/concurrency/ticketholder.cpp
index efec9b50ee2..2bda04c5c10 100644
--- a/src/mongo/util/concurrency/ticketholder.cpp
+++ b/src/mongo/util/concurrency/ticketholder.cpp
@@ -122,38 +122,33 @@ namespace mongo {
#else
- TicketHolder::TicketHolder( int num )
- : _outof(num),
- _num(num),
- _mutex("TicketHolder") {
- }
+ TicketHolder::TicketHolder( int num ) : _outof(num), _num(num) {}
- TicketHolder::~TicketHolder(){
- }
+ TicketHolder::~TicketHolder() = default;
bool TicketHolder::tryAcquire() {
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
return _tryAcquire();
}
void TicketHolder::waitForTicket() {
- scoped_lock lk( _mutex );
+ boost::unique_lock<boost::mutex> lk( _mutex );
while( ! _tryAcquire() ) {
- _newTicket.wait( lk.boost() );
+ _newTicket.wait( lk );
}
}
void TicketHolder::release() {
{
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
_num++;
}
_newTicket.notify_one();
}
Status TicketHolder::resize( int newSize ) {
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
int used = _outof.load() - _num;
if ( used > newSize ) {
diff --git a/src/mongo/util/fail_point.cpp b/src/mongo/util/fail_point.cpp
index a1d00b7830f..bbeb9cf00c9 100644
--- a/src/mongo/util/fail_point.cpp
+++ b/src/mongo/util/fail_point.cpp
@@ -80,12 +80,7 @@ namespace {
failPointPrng.getMake()->resetSeed(seed);
}
- FailPoint::FailPoint():
- _fpInfo(0),
- _mode(off),
- _timesOrPeriod(0),
- _modMutex("failPointMutex") {
- }
+ FailPoint::FailPoint() : _fpInfo(0), _mode(off), _timesOrPeriod(0) {}
void FailPoint::shouldFailCloseBlock() {
_fpInfo.subtractAndFetch(1);
@@ -100,7 +95,7 @@ namespace {
* 3. Sets the new mode.
*/
- scoped_lock scoped(_modMutex);
+ boost::lock_guard<boost::mutex> scoped(_modMutex);
// Step 1
disableFailPoint();
@@ -193,7 +188,7 @@ namespace {
BSONObj FailPoint::toBSON() const {
BSONObjBuilder builder;
- scoped_lock scoped(_modMutex);
+ boost::lock_guard<boost::mutex> scoped(_modMutex);
builder.append("mode", _mode);
builder.append("data", _data);
diff --git a/src/mongo/util/fail_point.h b/src/mongo/util/fail_point.h
index e2ff5089fd7..6ca1df82e14 100644
--- a/src/mongo/util/fail_point.h
+++ b/src/mongo/util/fail_point.h
@@ -28,10 +28,11 @@
#pragma once
+#include <boost/thread/mutex.hpp>
+
#include "mongo/base/disallow_copying.h"
#include "mongo/db/jsobj.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/util/concurrency/mutex.h"
namespace mongo {
/**
@@ -157,7 +158,7 @@ namespace mongo {
BSONObj _data;
// protects _mode, _timesOrPeriod, _data
- mutable mutex _modMutex;
+ mutable boost::mutex _modMutex;
/**
* Enables this fail point.
diff --git a/src/mongo/util/file_allocator.cpp b/src/mongo/util/file_allocator.cpp
index cf60e1cdadb..5acf3d68cb2 100644
--- a/src/mongo/util/file_allocator.cpp
+++ b/src/mongo/util/file_allocator.cpp
@@ -117,9 +117,7 @@ namespace mongo {
return parent;
}
- FileAllocator::FileAllocator()
- : _pendingMutex("FileAllocator"), _failed() {
- }
+ FileAllocator::FileAllocator() : _failed() {}
void FileAllocator::start() {
@@ -127,7 +125,7 @@ namespace mongo {
}
void FileAllocator::requestAllocation( const string &name, long &size ) {
- scoped_lock lk( _pendingMutex );
+ boost::lock_guard<boost::mutex> lk( _pendingMutex );
if ( _failed )
return;
long oldSize = prevSize( name );
@@ -141,7 +139,7 @@ namespace mongo {
}
void FileAllocator::allocateAsap( const string &name, unsigned long long &size ) {
- scoped_lock lk( _pendingMutex );
+ boost::unique_lock<boost::mutex> lk( _pendingMutex );
// In case the allocator is in failed state, check once before starting so that subsequent
// requests for the same database would fail fast after the first one has failed.
@@ -166,7 +164,7 @@ namespace mongo {
_pendingUpdated.notify_all();
while( inProgress( name ) ) {
checkFailure();
- _pendingUpdated.wait( lk.boost() );
+ _pendingUpdated.wait(lk);
}
}
@@ -174,9 +172,9 @@ namespace mongo {
void FileAllocator::waitUntilFinished() const {
if ( _failed )
return;
- scoped_lock lk( _pendingMutex );
+ boost::unique_lock<boost::mutex> lk( _pendingMutex );
while( _pending.size() != 0 )
- _pendingUpdated.wait( lk.boost() );
+ _pendingUpdated.wait(lk);
}
// TODO: pull this out to per-OS files once they exist
@@ -361,15 +359,15 @@ namespace mongo {
}
while( 1 ) {
{
- scoped_lock lk( fa->_pendingMutex );
+ boost::unique_lock<boost::mutex> lk( fa->_pendingMutex );
if ( fa->_pending.size() == 0 )
- fa->_pendingUpdated.wait( lk.boost() );
+ fa->_pendingUpdated.wait(lk);
}
while( 1 ) {
string name;
long size = 0;
{
- scoped_lock lk( fa->_pendingMutex );
+ boost::lock_guard<boost::mutex> lk( fa->_pendingMutex );
if ( fa->_pending.size() == 0 )
break;
name = fa->_pending.front();
@@ -441,7 +439,7 @@ namespace mongo {
}
{
- scoped_lock lk(fa->_pendingMutex);
+ boost::lock_guard<boost::mutex> lk(fa->_pendingMutex);
fa->_failed = true;
// TODO: Should we remove the file from pending?
@@ -454,7 +452,7 @@ namespace mongo {
}
{
- scoped_lock lk( fa->_pendingMutex );
+ boost::lock_guard<boost::mutex> lk( fa->_pendingMutex );
fa->_pendingSize.erase( name );
fa->_pending.pop_front();
fa->_pendingUpdated.notify_all();
diff --git a/src/mongo/util/mmap_win.cpp b/src/mongo/util/mmap_win.cpp
index 354ca4df62c..86e6c1e6b6b 100644
--- a/src/mongo/util/mmap_win.cpp
+++ b/src/mongo/util/mmap_win.cpp
@@ -69,7 +69,7 @@ namespace mongo {
// 2. Prevents calls to VirtualProtect while we remapping files.
// Lock Ordering:
// - If taken, must be after previewViews._m to prevent deadlocks
- mutex mapViewMutex("mapView");
+ mutex mapViewMutex;
MAdvise::MAdvise(void *,unsigned, Advice) { }
MAdvise::~MAdvise() { }
@@ -165,7 +165,7 @@ namespace mongo {
boost::lock_guard<boost::mutex> lk(_flushMutex);
{
- scoped_lock lk(mapViewMutex);
+ boost::lock_guard<boost::mutex> lk(mapViewMutex);
for (vector<void*>::iterator i = views.begin(); i != views.end(); i++) {
UnmapViewOfFile(*i);
@@ -187,7 +187,7 @@ namespace mongo {
void* MemoryMappedFile::createReadOnlyMap() {
verify( maphandle );
- scoped_lock lk(mapViewMutex);
+ boost::lock_guard<boost::mutex> lk(mapViewMutex);
void* readOnlyMapAddress = NULL;
int current_retry = 0;
@@ -299,7 +299,7 @@ namespace mongo {
void *view = 0;
{
- scoped_lock lk(mapViewMutex);
+ boost::lock_guard<boost::mutex> lk(mapViewMutex);
DWORD access = ( options & READONLY ) ? FILE_MAP_READ : FILE_MAP_ALL_ACCESS;
int current_retry = 0;
@@ -364,7 +364,7 @@ namespace mongo {
void* MemoryMappedFile::createPrivateMap() {
verify( maphandle );
- scoped_lock lk(mapViewMutex);
+ boost::lock_guard<boost::mutex> lk(mapViewMutex);
LPVOID thisAddress = getNextMemoryMappedFileLocation( len );
@@ -412,7 +412,7 @@ namespace mongo {
privateViews.clearWritableBits(oldPrivateAddr, len);
- scoped_lock lk(mapViewMutex);
+ boost::lock_guard<boost::mutex> lk(mapViewMutex);
if( !UnmapViewOfFile(oldPrivateAddr) ) {
DWORD dosError = GetLastError();
diff --git a/src/mongo/util/net/listen.cpp b/src/mongo/util/net/listen.cpp
index f260573039e..8841be6a787 100644
--- a/src/mongo/util/net/listen.cpp
+++ b/src/mongo/util/net/listen.cpp
@@ -643,7 +643,7 @@ namespace mongo {
std::set<std::string>* paths;
{
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
sockets = _sockets;
_sockets = new std::set<int>();
paths = _socketPaths;
diff --git a/src/mongo/util/net/listen.h b/src/mongo/util/net/listen.h
index 6efb3a717b3..481a646239f 100644
--- a/src/mongo/util/net/listen.h
+++ b/src/mongo/util/net/listen.h
@@ -142,26 +142,25 @@ namespace mongo {
class ListeningSockets {
public:
ListeningSockets()
- : _mutex("ListeningSockets")
- , _sockets( new std::set<int>() )
+ : _sockets( new std::set<int>() )
, _socketPaths( new std::set<std::string>() )
{ }
void add( int sock ) {
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
_sockets->insert( sock );
}
void addPath( const std::string& path ) {
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
_socketPaths->insert( path );
}
void remove( int sock ) {
- scoped_lock lk( _mutex );
+ boost::lock_guard<boost::mutex> lk( _mutex );
_sockets->erase( sock );
}
void closeAll();
static ListeningSockets* get();
private:
- mongo::mutex _mutex;
+ boost::mutex _mutex;
std::set<int>* _sockets;
std::set<std::string>* _socketPaths; // for unix domain sockets
static ListeningSockets* _instance;
diff --git a/src/mongo/util/net/message_port.cpp b/src/mongo/util/net/message_port.cpp
index 1ab8355dc05..3b6e3204417 100644
--- a/src/mongo/util/net/message_port.cpp
+++ b/src/mongo/util/net/message_port.cpp
@@ -115,9 +115,9 @@ namespace mongo {
std::set<MessagingPort*> ports;
mongo::mutex m;
public:
- Ports() : ports(), m("Ports") {}
+ Ports() : ports() {}
void closeAll(unsigned skip_mask) {
- scoped_lock bl(m);
+ boost::lock_guard<boost::mutex> bl(m);
for ( std::set<MessagingPort*>::iterator i = ports.begin(); i != ports.end(); i++ ) {
if( (*i)->tag & skip_mask )
continue;
@@ -125,11 +125,11 @@ namespace mongo {
}
}
void insert(MessagingPort* p) {
- scoped_lock bl(m);
+ boost::lock_guard<boost::mutex> bl(m);
ports.insert(p);
}
void erase(MessagingPort* p) {
- scoped_lock bl(m);
+ boost::lock_guard<boost::mutex> bl(m);
ports.erase(p);
}
};
diff --git a/src/mongo/util/queue.h b/src/mongo/util/queue.h
index 07ba57e87ab..7ae46f97325 100644
--- a/src/mongo/util/queue.h
+++ b/src/mongo/util/queue.h
@@ -56,26 +56,23 @@ namespace mongo {
typedef size_t (*getSizeFunc)(const T& t);
public:
BlockingQueue() :
- _lock("BlockingQueue"),
_maxSize(std::numeric_limits<std::size_t>::max()),
_currentSize(0),
_getSize(&_getSizeDefault) {}
BlockingQueue(size_t size) :
- _lock("BlockingQueue(bounded)"),
_maxSize(size),
_currentSize(0),
_getSize(&_getSizeDefault) {}
BlockingQueue(size_t size, getSizeFunc f) :
- _lock("BlockingQueue(custom size)"),
_maxSize(size),
_currentSize(0),
_getSize(f) {}
void push(T const& t) {
- scoped_lock l( _lock );
+ boost::unique_lock<boost::mutex> l( _lock );
size_t tSize = _getSize(t);
while (_currentSize + tSize > _maxSize) {
- _cvNoLongerFull.wait( l.boost() );
+ _cvNoLongerFull.wait( l );
}
_queue.push( t );
_currentSize += tSize;
@@ -83,7 +80,7 @@ namespace mongo {
}
bool empty() const {
- scoped_lock l( _lock );
+ boost::lock_guard<boost::mutex> l( _lock );
return _queue.empty();
}
@@ -91,7 +88,7 @@ namespace mongo {
* The size as measured by the size function. Default to counting each item
*/
size_t size() const {
- scoped_lock l( _lock );
+ boost::lock_guard<boost::mutex> l( _lock );
return _currentSize;
}
@@ -106,19 +103,19 @@ namespace mongo {
* The number/count of items in the queue ( _queue.size() )
*/
size_t count() const {
- scoped_lock l( _lock );
+ boost::lock_guard<boost::mutex> l( _lock );
return _queue.size();
}
void clear() {
- scoped_lock l(_lock);
+ boost::lock_guard<boost::mutex> l(_lock);
_queue = std::queue<T>();
_currentSize = 0;
_cvNoLongerFull.notify_one();
}
bool tryPop( T & t ) {
- scoped_lock l( _lock );
+ boost::lock_guard<boost::mutex> l( _lock );
if ( _queue.empty() )
return false;
@@ -132,9 +129,9 @@ namespace mongo {
T blockingPop() {
- scoped_lock l( _lock );
+ boost::unique_lock<boost::mutex> l( _lock );
while( _queue.empty() )
- _cvNoLongerEmpty.wait( l.boost() );
+ _cvNoLongerEmpty.wait( l );
T t = _queue.front();
_queue.pop();
@@ -158,9 +155,9 @@ namespace mongo {
boost::xtime_get(&xt, MONGO_BOOST_TIME_UTC);
xt.sec += maxSecondsToWait;
- scoped_lock l( _lock );
+ boost::unique_lock<boost::mutex> l( _lock );
while( _queue.empty() ) {
- if ( ! _cvNoLongerEmpty.timed_wait( l.boost() , xt ) )
+ if ( ! _cvNoLongerEmpty.timed_wait( l , xt ) )
return false;
}
@@ -180,9 +177,9 @@ namespace mongo {
boost::xtime_get(&xt, MONGO_BOOST_TIME_UTC);
xt.sec += maxSecondsToWait;
- scoped_lock l( _lock );
+ boost::unique_lock<boost::mutex> l( _lock );
while( _queue.empty() ) {
- if ( ! _cvNoLongerEmpty.timed_wait( l.boost() , xt ) )
+ if ( ! _cvNoLongerEmpty.timed_wait( l , xt ) )
return false;
}
@@ -194,7 +191,7 @@ namespace mongo {
// only one consumer
bool peek(T& t) {
- scoped_lock l( _lock );
+ boost::unique_lock<boost::mutex> l( _lock );
if (_queue.empty()) {
return false;
}