summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAndy Schwerin <schwerin@mongodb.com>2015-03-04 10:30:44 -0500
committerAndy Schwerin <schwerin@mongodb.com>2015-03-05 17:35:14 -0500
commit59a9a04651486763b49ddb706a195bebccf76642 (patch)
tree335fa6d0cbc04f2c3577d2e4594e9a573f2b4594 /src
parentac525a4566a46b6b79a8465903b2ab0391eab506 (diff)
downloadmongo-59a9a04651486763b49ddb706a195bebccf76642.tar.gz
SERVER-17310 Replace boost::*_mutex::scoped_lock with boost::lock_guard.
Achieved via grep, sed and bash: grep -Irl mutex::scoped_lock src/mongo | xargs sed -i.orig -E 's/(boost::(recursive_|timed_)?)mutex::scoped_lock/boost::lock_guard<\1mutex>/' Then, by converting boost::lock_guard to boost::unique_lock as appropriate. Finally, by removing unused mongo::mutex::try_lock.
Diffstat (limited to 'src')
-rw-r--r--src/mongo/client/replica_set_monitor.cpp22
-rw-r--r--src/mongo/db/auth/internal_user_auth.cpp4
-rw-r--r--src/mongo/db/background.cpp10
-rw-r--r--src/mongo/db/client.cpp6
-rw-r--r--src/mongo/db/clientcursor.h2
-rw-r--r--src/mongo/db/clientlistplugin.cpp4
-rw-r--r--src/mongo/db/curop.cpp2
-rw-r--r--src/mongo/db/currentop_command.cpp2
-rw-r--r--src/mongo/db/global_environment_d.cpp8
-rw-r--r--src/mongo/db/index_builder.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp2
-rw-r--r--src/mongo/db/stats/lock_server_status_section.cpp2
-rw-r--r--src/mongo/db/storage/in_memory/in_memory_engine.cpp8
-rw-r--r--src/mongo/db/storage/kv/kv_catalog.cpp18
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine.cpp10
-rw-r--r--src/mongo/db/storage/mmap_v1/dur.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp6
-rw-r--r--src/mongo/db/storage/rocks/rocks_engine.cpp10
-rw-r--r--src/mongo/db/storage/rocks/rocks_record_store.cpp16
-rw-r--r--src/mongo/db/storage/rocks/rocks_record_store_mongod.cpp4
-rw-r--r--src/mongo/db/storage/rocks/rocks_transaction.cpp8
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp8
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp18
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp4
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp4
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp20
-rw-r--r--src/mongo/dbtests/perftests.cpp4
-rw-r--r--src/mongo/scripting/v8-3.25_utils.cpp4
-rw-r--r--src/mongo/scripting/v8_utils.cpp4
-rw-r--r--src/mongo/shell/bench.cpp14
-rw-r--r--src/mongo/shell/shell_utils_launcher.cpp20
-rw-r--r--src/mongo/util/concurrency/mutex.h16
-rw-r--r--src/mongo/util/concurrency/qlock.h28
-rw-r--r--src/mongo/util/concurrency/ticketholder.cpp2
-rw-r--r--src/mongo/util/signal_handlers_synchronous.cpp10
-rw-r--r--src/mongo/util/tcmalloc_server_status_section.cpp2
36 files changed, 147 insertions, 159 deletions
diff --git a/src/mongo/client/replica_set_monitor.cpp b/src/mongo/client/replica_set_monitor.cpp
index 66af1596d32..57de0e616d8 100644
--- a/src/mongo/client/replica_set_monitor.cpp
+++ b/src/mongo/client/replica_set_monitor.cpp
@@ -284,7 +284,7 @@ namespace {
HostAndPort ReplicaSetMonitor::getHostOrRefresh(const ReadPreferenceSetting& criteria) {
{
- boost::mutex::scoped_lock lk(_state->mutex);
+ boost::lock_guard<boost::mutex> lk(_state->mutex);
HostAndPort out = _state->getMatchingHost(criteria);
if (!out.empty())
return out;
@@ -313,7 +313,7 @@ namespace {
}
Refresher ReplicaSetMonitor::startOrContinueRefresh() {
- boost::mutex::scoped_lock lk(_state->mutex);
+ boost::lock_guard<boost::mutex> lk(_state->mutex);
Refresher out(_state);
DEV _state->checkInvariants();
@@ -321,7 +321,7 @@ namespace {
}
void ReplicaSetMonitor::failedHost(const HostAndPort& host) {
- boost::mutex::scoped_lock lk(_state->mutex);
+ boost::lock_guard<boost::mutex> lk(_state->mutex);
Node* node = _state->findNode(host);
if (node)
node->markFailed();
@@ -329,19 +329,19 @@ namespace {
}
bool ReplicaSetMonitor::isPrimary(const HostAndPort& host) const {
- boost::mutex::scoped_lock lk(_state->mutex);
+ boost::lock_guard<boost::mutex> lk(_state->mutex);
Node* node = _state->findNode(host);
return node ? node->isMaster : false;
}
bool ReplicaSetMonitor::isHostUp(const HostAndPort& host) const {
- boost::mutex::scoped_lock lk(_state->mutex);
+ boost::lock_guard<boost::mutex> lk(_state->mutex);
Node* node = _state->findNode(host);
return node ? node->isUp : false;
}
int ReplicaSetMonitor::getConsecutiveFailedScans() const {
- boost::mutex::scoped_lock lk(_state->mutex);
+ boost::lock_guard<boost::mutex> lk(_state->mutex);
return _state->consecutiveFailedScans;
}
@@ -351,12 +351,12 @@ namespace {
}
std::string ReplicaSetMonitor::getServerAddress() const {
- boost::mutex::scoped_lock lk(_state->mutex);
+ boost::lock_guard<boost::mutex> lk(_state->mutex);
return _state->getServerAddress();
}
bool ReplicaSetMonitor::contains(const HostAndPort& host) const {
- boost::mutex::scoped_lock lk(_state->mutex);
+ boost::lock_guard<boost::mutex> lk(_state->mutex);
return _state->seedNodes.count(host);
}
@@ -413,7 +413,7 @@ namespace {
if (!clearSeedCache) {
// Save list of current set members so that the monitor can be rebuilt if needed.
const ReplicaSetMonitorPtr& rsm = setIt->second;
- boost::mutex::scoped_lock lk(rsm->_state->mutex);
+ boost::lock_guard<boost::mutex> lk(rsm->_state->mutex);
seedServers[name] = rsm->_state->seedNodes;
}
sets.erase(setIt);
@@ -436,7 +436,7 @@ namespace {
// TODO move to correct order with non-statics before pushing
void ReplicaSetMonitor::appendInfo(BSONObjBuilder& bsonObjBuilder) const {
- boost::mutex::scoped_lock lk(_state->mutex);
+ boost::lock_guard<boost::mutex> lk(_state->mutex);
// NOTE: the format here must be consistent for backwards compatibility
BSONArrayBuilder hosts(bsonObjBuilder.subarrayStart("hosts"));
@@ -746,7 +746,7 @@ namespace {
}
HostAndPort Refresher::_refreshUntilMatches(const ReadPreferenceSetting* criteria) {
- boost::mutex::scoped_lock lk(_set->mutex);
+ boost::unique_lock<boost::mutex> lk(_set->mutex);
while (true) {
if (criteria) {
HostAndPort out = _set->getMatchingHost(*criteria);
diff --git a/src/mongo/db/auth/internal_user_auth.cpp b/src/mongo/db/auth/internal_user_auth.cpp
index 36e5617a181..f5a0c4029a1 100644
--- a/src/mongo/db/auth/internal_user_auth.cpp
+++ b/src/mongo/db/auth/internal_user_auth.cpp
@@ -59,7 +59,7 @@ namespace mongo {
if (!isInternalAuthSet()) {
authParamsSet = true;
}
- boost::mutex::scoped_lock lk(authParamMutex);
+ boost::lock_guard<boost::mutex> lk(authParamMutex);
if (authParamsIn["mechanism"].String() != "SCRAM-SHA-1") {
authParams = authParamsIn.copy();
@@ -83,7 +83,7 @@ namespace mongo {
return BSONObj();
}
- boost::mutex::scoped_lock lk(authParamMutex);
+ boost::lock_guard<boost::mutex> lk(authParamMutex);
return authParams.copy();
}
diff --git a/src/mongo/db/background.cpp b/src/mongo/db/background.cpp
index 56336acd4ae..6c859d5d700 100644
--- a/src/mongo/db/background.cpp
+++ b/src/mongo/db/background.cpp
@@ -119,12 +119,12 @@ namespace {
} // namespace
bool BackgroundOperation::inProgForDb(StringData db) {
- boost::mutex::scoped_lock lk(m);
+ boost::lock_guard<boost::mutex> lk(m);
return dbsInProg.find(db) != dbsInProg.end();
}
bool BackgroundOperation::inProgForNs(StringData ns) {
- boost::mutex::scoped_lock lk(m);
+ boost::lock_guard<boost::mutex> lk(m);
return nsInProg.find(ns) != nsInProg.end();
}
@@ -153,19 +153,19 @@ namespace {
}
BackgroundOperation::BackgroundOperation(StringData ns) : _ns(ns) {
- boost::mutex::scoped_lock lk(m);
+ boost::lock_guard<boost::mutex> lk(m);
recordBeginAndInsert(&dbsInProg, _ns.db());
recordBeginAndInsert(&nsInProg, _ns.ns());
}
BackgroundOperation::~BackgroundOperation() {
- boost::mutex::scoped_lock lk(m);
+ boost::lock_guard<boost::mutex> lk(m);
recordEndAndRemove(&dbsInProg, _ns.db());
recordEndAndRemove(&nsInProg, _ns.ns());
}
void BackgroundOperation::dump(std::ostream& ss) {
- boost::mutex::scoped_lock lk(m);
+ boost::lock_guard<boost::mutex> lk(m);
if( nsInProg.size() ) {
ss << "\n<b>Background Jobs in Progress</b>\n";
for( BgInfoMapIterator i = nsInProg.begin(); i != nsInProg.end(); ++i )
diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp
index 79ad6edc9e7..9b6d10e8067 100644
--- a/src/mongo/db/client.cpp
+++ b/src/mongo/db/client.cpp
@@ -120,7 +120,7 @@ namespace {
currentClient.reset(client);
// This makes the client visible to maintenance threads
- boost::mutex::scoped_lock clientLock(clientsMutex);
+ boost::lock_guard<boost::mutex> clientLock(clientsMutex);
clients.insert(client);
}
@@ -143,7 +143,7 @@ namespace {
if ( ! inShutdown() ) {
// we can't clean up safely once we're in shutdown
{
- boost::mutex::scoped_lock clientLock(clientsMutex);
+ boost::lock_guard<boost::mutex> clientLock(clientsMutex);
if ( ! _shutdown )
clients.erase(this);
}
@@ -162,7 +162,7 @@ namespace {
if ( inShutdown() )
return false;
{
- boost::mutex::scoped_lock clientLock(clientsMutex);
+ boost::lock_guard<boost::mutex> clientLock(clientsMutex);
clients.erase(this);
}
diff --git a/src/mongo/db/clientcursor.h b/src/mongo/db/clientcursor.h
index f89eaecd681..7f2736306c9 100644
--- a/src/mongo/db/clientcursor.h
+++ b/src/mongo/db/clientcursor.h
@@ -42,7 +42,7 @@
namespace mongo {
- typedef boost::recursive_mutex::scoped_lock recursive_scoped_lock;
+ typedef boost::lock_guard<boost::recursive_mutex> recursive_scoped_lock;
class ClientCursor;
class Collection;
class CurOp;
diff --git a/src/mongo/db/clientlistplugin.cpp b/src/mongo/db/clientlistplugin.cpp
index cf7e359d309..a8200a5c997 100644
--- a/src/mongo/db/clientlistplugin.cpp
+++ b/src/mongo/db/clientlistplugin.cpp
@@ -86,7 +86,7 @@ namespace {
static void _processAllClients(std::stringstream& ss) {
using namespace html;
- boost::mutex::scoped_lock scopedLock(Client::clientsMutex);
+ boost::lock_guard<boost::mutex> scopedLock(Client::clientsMutex);
ClientSet::const_iterator it = Client::clients.begin();
for (; it != Client::clients.end(); it++) {
@@ -198,7 +198,7 @@ namespace {
static BSONArray _processAllClients(MatchExpression* matcher) {
BSONArrayBuilder array;
- boost::mutex::scoped_lock scopedLock(Client::clientsMutex);
+ boost::lock_guard<boost::mutex> scopedLock(Client::clientsMutex);
ClientSet::const_iterator it = Client::clients.begin();
for (; it != Client::clients.end(); it++) {
diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp
index ae26aa84dd2..7379aa6bd7a 100644
--- a/src/mongo/db/curop.cpp
+++ b/src/mongo/db/curop.cpp
@@ -131,7 +131,7 @@ namespace mongo {
CurOp::~CurOp() {
if ( _wrapped ) {
- boost::mutex::scoped_lock clientLock(Client::clientsMutex);
+ boost::lock_guard<boost::mutex> clientLock(Client::clientsMutex);
_client->_curOp = _wrapped;
}
_client = 0;
diff --git a/src/mongo/db/currentop_command.cpp b/src/mongo/db/currentop_command.cpp
index 1dd1b784201..4b372ecb35b 100644
--- a/src/mongo/db/currentop_command.cpp
+++ b/src/mongo/db/currentop_command.cpp
@@ -92,7 +92,7 @@ namespace mongo {
BSONArrayBuilder inprogBuilder(retVal.subarrayStart("inprog"));
- boost::mutex::scoped_lock scopedLock(Client::clientsMutex);
+ boost::lock_guard<boost::mutex> scopedLock(Client::clientsMutex);
ClientSet::const_iterator it = Client::clients.begin();
for ( ; it != Client::clients.end(); it++) {
diff --git a/src/mongo/db/global_environment_d.cpp b/src/mongo/db/global_environment_d.cpp
index 67d5b1b56c5..e4fa3c30069 100644
--- a/src/mongo/db/global_environment_d.cpp
+++ b/src/mongo/db/global_environment_d.cpp
@@ -166,7 +166,7 @@ namespace mongo {
}
void GlobalEnvironmentMongoD::setKillAllOperations() {
- boost::mutex::scoped_lock clientLock(Client::clientsMutex);
+ boost::lock_guard<boost::mutex> clientLock(Client::clientsMutex);
_globalKill = true;
for (size_t i = 0; i < _killOpListeners.size(); i++) {
try {
@@ -207,7 +207,7 @@ namespace mongo {
}
bool GlobalEnvironmentMongoD::killOperation(unsigned int opId) {
- boost::mutex::scoped_lock clientLock(Client::clientsMutex);
+ boost::lock_guard<boost::mutex> clientLock(Client::clientsMutex);
for(ClientSet::const_iterator j = Client::clients.begin();
j != Client::clients.end(); ++j) {
@@ -224,7 +224,7 @@ namespace mongo {
}
void GlobalEnvironmentMongoD::killAllUserOperations(const OperationContext* txn) {
- boost::mutex::scoped_lock scopedLock(Client::clientsMutex);
+ boost::lock_guard<boost::mutex> scopedLock(Client::clientsMutex);
for (ClientSet::const_iterator i = Client::clients.begin();
i != Client::clients.end(); i++) {
@@ -250,7 +250,7 @@ namespace mongo {
}
void GlobalEnvironmentMongoD::registerKillOpListener(KillOpListenerInterface* listener) {
- boost::mutex::scoped_lock clientLock(Client::clientsMutex);
+ boost::lock_guard<boost::mutex> clientLock(Client::clientsMutex);
_killOpListeners.push_back(listener);
}
diff --git a/src/mongo/db/index_builder.cpp b/src/mongo/db/index_builder.cpp
index dca08c69de0..b50bdf552a2 100644
--- a/src/mongo/db/index_builder.cpp
+++ b/src/mongo/db/index_builder.cpp
@@ -59,7 +59,7 @@ namespace {
boost::condition_variable _bgIndexStartingCondVar;
void _setBgIndexStarting() {
- boost::mutex::scoped_lock lk(_bgIndexStartingMutex);
+ boost::lock_guard<boost::mutex> lk(_bgIndexStartingMutex);
invariant(_bgIndexStarting == false);
_bgIndexStarting = true;
_bgIndexStartingCondVar.notify_one();
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 777e7b4ad05..3740fec5aa2 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -2203,7 +2203,7 @@ namespace {
}
WriteConcernOptions ReplicationCoordinatorImpl::getGetLastErrorDefault() {
- boost::mutex::scoped_lock lock(_mutex);
+ boost::lock_guard<boost::mutex> lock(_mutex);
if (_rsConfig.isInitialized()) {
return _rsConfig.getDefaultWriteConcern();
}
diff --git a/src/mongo/db/stats/lock_server_status_section.cpp b/src/mongo/db/stats/lock_server_status_section.cpp
index 84d1ce53df5..1e81904b158 100644
--- a/src/mongo/db/stats/lock_server_status_section.cpp
+++ b/src/mongo/db/stats/lock_server_status_section.cpp
@@ -56,7 +56,7 @@ namespace {
// This returns the blocked lock states
{
- boost::mutex::scoped_lock scopedLock(Client::clientsMutex);
+ boost::lock_guard<boost::mutex> scopedLock(Client::clientsMutex);
// Count all clients
numTotal = Client::clients.size();
diff --git a/src/mongo/db/storage/in_memory/in_memory_engine.cpp b/src/mongo/db/storage/in_memory/in_memory_engine.cpp
index 01bb87b62a8..6c57429215d 100644
--- a/src/mongo/db/storage/in_memory/in_memory_engine.cpp
+++ b/src/mongo/db/storage/in_memory/in_memory_engine.cpp
@@ -53,7 +53,7 @@ namespace mongo {
StringData ns,
StringData ident,
const CollectionOptions& options) {
- boost::mutex::scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
if (options.capped) {
return new InMemoryRecordStore(ns,
&_dataMap[ident],
@@ -77,13 +77,13 @@ namespace mongo {
SortedDataInterface* InMemoryEngine::getSortedDataInterface(OperationContext* opCtx,
StringData ident,
const IndexDescriptor* desc) {
- boost::mutex::scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
return getInMemoryBtreeImpl(Ordering::make(desc->keyPattern()), &_dataMap[ident]);
}
Status InMemoryEngine::dropIdent(OperationContext* opCtx,
StringData ident) {
- boost::mutex::scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
_dataMap.erase(ident);
return Status::OK();
}
@@ -96,7 +96,7 @@ namespace mongo {
std::vector<std::string> InMemoryEngine::getAllIdents( OperationContext* opCtx ) const {
std::vector<std::string> all;
{
- boost::mutex::scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
for ( DataMap::const_iterator it = _dataMap.begin(); it != _dataMap.end(); ++it ) {
all.push_back( it->first );
}
diff --git a/src/mongo/db/storage/kv/kv_catalog.cpp b/src/mongo/db/storage/kv/kv_catalog.cpp
index 20f0c28b830..764e4b4c93e 100644
--- a/src/mongo/db/storage/kv/kv_catalog.cpp
+++ b/src/mongo/db/storage/kv/kv_catalog.cpp
@@ -64,7 +64,7 @@ namespace {
virtual void commit() {}
virtual void rollback() {
- boost::mutex::scoped_lock lk(_catalog->_identsLock);
+ boost::lock_guard<boost::mutex> lk(_catalog->_identsLock);
_catalog->_idents.erase(_ident);
}
@@ -80,7 +80,7 @@ namespace {
virtual void commit() {}
virtual void rollback() {
- boost::mutex::scoped_lock lk(_catalog->_identsLock);
+ boost::lock_guard<boost::mutex> lk(_catalog->_identsLock);
_catalog->_idents[_ident] = _entry;
}
@@ -152,7 +152,7 @@ namespace {
}
void KVCatalog::getAllCollections( std::vector<std::string>* out ) const {
- boost::mutex::scoped_lock lk( _identsLock );
+ boost::lock_guard<boost::mutex> lk( _identsLock );
for ( NSToIdentMap::const_iterator it = _idents.begin(); it != _idents.end(); ++it ) {
out->push_back( it->first );
}
@@ -173,7 +173,7 @@ namespace {
const string ident = _newUniqueIdent(ns, "collection");
- boost::mutex::scoped_lock lk( _identsLock );
+ boost::lock_guard<boost::mutex> lk( _identsLock );
Entry& old = _idents[ns.toString()];
if ( !old.ident.empty() ) {
return Status( ErrorCodes::NamespaceExists, "collection already exists" );
@@ -203,7 +203,7 @@ namespace {
}
std::string KVCatalog::getCollectionIdent( StringData ns ) const {
- boost::mutex::scoped_lock lk( _identsLock );
+ boost::lock_guard<boost::mutex> lk( _identsLock );
NSToIdentMap::const_iterator it = _idents.find( ns.toString() );
invariant( it != _idents.end() );
return it->second.ident;
@@ -230,7 +230,7 @@ namespace {
RecordId dl;
{
- boost::mutex::scoped_lock lk( _identsLock );
+ boost::lock_guard<boost::mutex> lk( _identsLock );
NSToIdentMap::const_iterator it = _idents.find( ns.toString() );
invariant( it != _idents.end() );
dl = it->second.storedLoc;
@@ -356,7 +356,7 @@ namespace {
invariant( status.getValue() == loc );
}
- boost::mutex::scoped_lock lk( _identsLock );
+ boost::lock_guard<boost::mutex> lk( _identsLock );
const NSToIdentMap::iterator fromIt = _idents.find(fromNS.toString());
invariant(fromIt != _idents.end());
@@ -380,7 +380,7 @@ namespace {
MODE_X));
}
- boost::mutex::scoped_lock lk( _identsLock );
+ boost::lock_guard<boost::mutex> lk( _identsLock );
const NSToIdentMap::iterator it = _idents.find(ns.toString());
if (it == _idents.end()) {
return Status( ErrorCodes::NamespaceNotFound, "collection not found" );
@@ -399,7 +399,7 @@ namespace {
std::vector<std::string> v;
{
- boost::mutex::scoped_lock lk( _identsLock );
+ boost::lock_guard<boost::mutex> lk( _identsLock );
for ( NSToIdentMap::const_iterator it = _idents.begin(); it != _idents.end(); ++it ) {
NamespaceString ns( it->first );
if ( ns.db() != db )
diff --git a/src/mongo/db/storage/kv/kv_storage_engine.cpp b/src/mongo/db/storage/kv/kv_storage_engine.cpp
index 89ecf91f958..7ff8221d999 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine.cpp
+++ b/src/mongo/db/storage/kv/kv_storage_engine.cpp
@@ -61,7 +61,7 @@ namespace mongo {
}
virtual void rollback() {
- boost::mutex::scoped_lock lk(_engine->_dbsLock);
+ boost::lock_guard<boost::mutex> lk(_engine->_dbsLock);
_engine->_dbs[_db] = _entry;
}
@@ -196,7 +196,7 @@ namespace mongo {
}
void KVStorageEngine::listDatabases( std::vector<std::string>* out ) const {
- boost::mutex::scoped_lock lk( _dbsLock );
+ boost::lock_guard<boost::mutex> lk( _dbsLock );
for ( DBMap::const_iterator it = _dbs.begin(); it != _dbs.end(); ++it ) {
if ( it->second->isEmpty() )
continue;
@@ -206,7 +206,7 @@ namespace mongo {
DatabaseCatalogEntry* KVStorageEngine::getDatabaseCatalogEntry( OperationContext* opCtx,
StringData dbName ) {
- boost::mutex::scoped_lock lk( _dbsLock );
+ boost::lock_guard<boost::mutex> lk( _dbsLock );
KVDatabaseCatalogEntry*& db = _dbs[dbName.toString()];
if ( !db ) {
// Not registering change since db creation is implicit and never rolled back.
@@ -224,7 +224,7 @@ namespace mongo {
KVDatabaseCatalogEntry* entry;
{
- boost::mutex::scoped_lock lk( _dbsLock );
+ boost::lock_guard<boost::mutex> lk( _dbsLock );
DBMap::const_iterator it = _dbs.find( db.toString() );
if ( it == _dbs.end() )
return Status( ErrorCodes::NamespaceNotFound, "db not found to drop" );
@@ -250,7 +250,7 @@ namespace mongo {
invariant( toDrop.empty() );
{
- boost::mutex::scoped_lock lk( _dbsLock );
+ boost::lock_guard<boost::mutex> lk( _dbsLock );
txn->recoveryUnit()->registerChange(new RemoveDBChange(this, db, entry));
_dbs.erase( db.toString() );
}
diff --git a/src/mongo/db/storage/mmap_v1/dur.cpp b/src/mongo/db/storage/mmap_v1/dur.cpp
index b22aae79d9c..09a3c995acf 100644
--- a/src/mongo/db/storage/mmap_v1/dur.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur.cpp
@@ -709,7 +709,7 @@ namespace {
}
try {
- boost::mutex::scoped_lock lock(flushMutex);
+ boost::unique_lock<boost::mutex> lock(flushMutex);
for (unsigned i = 0; i <= 2; i++) {
if (flushRequested.timed_wait(lock, Milliseconds(oneThird))) {
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
index c60b687544d..29119c7116d 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
@@ -253,7 +253,7 @@ namespace {
DatabaseCatalogEntry* MMAPV1Engine::getDatabaseCatalogEntry( OperationContext* opCtx,
StringData db ) {
{
- boost::mutex::scoped_lock lk(_entryMapMutex);
+ boost::lock_guard<boost::mutex> lk(_entryMapMutex);
EntryMap::const_iterator iter = _entryMap.find(db.toString());
if (iter != _entryMap.end()) {
return iter->second;
@@ -271,7 +271,7 @@ namespace {
storageGlobalParams.directoryperdb,
false);
- boost::mutex::scoped_lock lk(_entryMapMutex);
+ boost::lock_guard<boost::mutex> lk(_entryMapMutex);
// Sanity check that we are not overwriting something
invariant(_entryMap.insert(EntryMap::value_type(db.toString(), entry)).second);
@@ -285,7 +285,7 @@ namespace {
// global journal entries occur, which happen to have write intents for the removed files.
getDur().syncDataAndTruncateJournal(txn);
- boost::mutex::scoped_lock lk( _entryMapMutex );
+ boost::lock_guard<boost::mutex> lk( _entryMapMutex );
MMAPV1DatabaseCatalogEntry* entry = _entryMap[db.toString()];
delete entry;
_entryMap.erase( db.toString() );
diff --git a/src/mongo/db/storage/rocks/rocks_engine.cpp b/src/mongo/db/storage/rocks/rocks_engine.cpp
index fa8b71b5f22..e0955aeb848 100644
--- a/src/mongo/db/storage/rocks/rocks_engine.cpp
+++ b/src/mongo/db/storage/rocks/rocks_engine.cpp
@@ -124,7 +124,7 @@ namespace mongo {
// load ident to prefix map
{
- boost::mutex::scoped_lock lk(_identPrefixMapMutex);
+ boost::lock_guard<boost::mutex> lk(_identPrefixMapMutex);
for (_iter->Seek(kMetadataPrefix);
_iter->Valid() && _iter->key().starts_with(kMetadataPrefix); _iter->Next()) {
rocksdb::Slice ident(_iter->key());
@@ -202,7 +202,7 @@ namespace mongo {
}
{
- boost::mutex::scoped_lock lk(_identPrefixMapMutex);
+ boost::lock_guard<boost::mutex> lk(_identPrefixMapMutex);
_identPrefixMap.erase(ident);
}
@@ -210,7 +210,7 @@ namespace mongo {
}
bool RocksEngine::hasIdent(OperationContext* opCtx, StringData ident) const {
- boost::mutex::scoped_lock lk(_identPrefixMapMutex);
+ boost::lock_guard<boost::mutex> lk(_identPrefixMapMutex);
return _identPrefixMap.find(ident) != _identPrefixMap.end();
}
@@ -226,7 +226,7 @@ namespace mongo {
Status RocksEngine::_createIdentPrefix(StringData ident) {
uint32_t prefix = 0;
{
- boost::mutex::scoped_lock lk(_identPrefixMapMutex);
+ boost::lock_guard<boost::mutex> lk(_identPrefixMapMutex);
if (_identPrefixMap.find(ident) != _identPrefixMap.end()) {
// already exists
return Status::OK();
@@ -247,7 +247,7 @@ namespace mongo {
}
std::string RocksEngine::_getIdentPrefix(StringData ident) {
- boost::mutex::scoped_lock lk(_identPrefixMapMutex);
+ boost::lock_guard<boost::mutex> lk(_identPrefixMapMutex);
auto prefixIter = _identPrefixMap.find(ident);
invariant(prefixIter != _identPrefixMap.end());
return encodePrefix(prefixIter->second);
diff --git a/src/mongo/db/storage/rocks/rocks_record_store.cpp b/src/mongo/db/storage/rocks/rocks_record_store.cpp
index 7466b39c5e6..fa4d53306be 100644
--- a/src/mongo/db/storage/rocks/rocks_record_store.cpp
+++ b/src/mongo/db/storage/rocks/rocks_record_store.cpp
@@ -83,7 +83,7 @@ namespace mongo {
void CappedVisibilityManager::addUncommittedRecord(OperationContext* txn,
const RecordId& record) {
- boost::mutex::scoped_lock lk(_lock);
+ boost::lock_guard<boost::mutex> lk(_lock);
_addUncommittedRecord_inlock(txn, record);
}
@@ -98,14 +98,14 @@ namespace mongo {
RecordId CappedVisibilityManager::getNextAndAddUncommittedRecord(
OperationContext* txn, std::function<RecordId()> nextId) {
- boost::mutex::scoped_lock lk(_lock);
+ boost::lock_guard<boost::mutex> lk(_lock);
RecordId record = nextId();
_addUncommittedRecord_inlock(txn, record);
return record;
}
void CappedVisibilityManager::dealtWithCappedRecord(const RecordId& record) {
- boost::mutex::scoped_lock lk(_lock);
+ boost::lock_guard<boost::mutex> lk(_lock);
std::vector<RecordId>::iterator it =
std::find(_uncommittedRecords.begin(), _uncommittedRecords.end(), record);
invariant(it != _uncommittedRecords.end());
@@ -113,7 +113,7 @@ namespace mongo {
}
bool CappedVisibilityManager::isCappedHidden(const RecordId& record) const {
- boost::mutex::scoped_lock lk(_lock);
+ boost::lock_guard<boost::mutex> lk(_lock);
if (_uncommittedRecords.empty()) {
return false;
}
@@ -122,7 +122,7 @@ namespace mongo {
void CappedVisibilityManager::updateHighestSeen(const RecordId& record) {
if (record > _oplog_highestSeen) {
- boost::mutex::scoped_lock lk(_lock);
+ boost::lock_guard<boost::mutex> lk(_lock);
if (record > _oplog_highestSeen) {
_oplog_highestSeen = record;
}
@@ -130,7 +130,7 @@ namespace mongo {
}
RecordId CappedVisibilityManager::oplogStartHack() const {
- boost::mutex::scoped_lock lk(_lock);
+ boost::lock_guard<boost::mutex> lk(_lock);
if (_uncommittedRecords.empty()) {
return _oplog_highestSeen;
} else {
@@ -197,7 +197,7 @@ namespace mongo {
RocksRecordStore::~RocksRecordStore() {
{
- boost::timed_mutex::scoped_lock lk(_cappedDeleterMutex);
+ boost::lock_guard<boost::timed_mutex> lk(_cappedDeleterMutex);
_shuttingDown = true;
}
}
@@ -276,7 +276,7 @@ namespace mongo {
}
// ensure only one thread at a time can do deletes, otherwise they'll conflict.
- boost::timed_mutex::scoped_lock lock(_cappedDeleterMutex, boost::defer_lock);
+ boost::lock_guard<boost::timed_mutex> lock(_cappedDeleterMutex, boost::defer_lock);
if (_cappedMaxDocs != -1) {
lock.lock(); // Max docs has to be exact, so have to check every time.
diff --git a/src/mongo/db/storage/rocks/rocks_record_store_mongod.cpp b/src/mongo/db/storage/rocks/rocks_record_store_mongod.cpp
index 91d86698705..0fc8e64e037 100644
--- a/src/mongo/db/storage/rocks/rocks_record_store_mongod.cpp
+++ b/src/mongo/db/storage/rocks/rocks_record_store_mongod.cpp
@@ -99,7 +99,7 @@ namespace mongo {
RocksRecordStore* rs =
checked_cast<RocksRecordStore*>(collection->getRecordStore());
WriteUnitOfWork wuow(&txn);
- boost::timed_mutex::scoped_lock lock(rs->cappedDeleterMutex());
+ boost::lock_guard<boost::timed_mutex> lock(rs->cappedDeleterMutex());
int64_t removed = rs->cappedDeleteAsNeeded_inlock(&txn, RecordId::max());
wuow.commit();
return removed;
@@ -155,7 +155,7 @@ namespace mongo {
return false;
}
- boost::mutex::scoped_lock lock(_backgroundThreadMutex);
+ boost::lock_guard<boost::mutex> lock(_backgroundThreadMutex);
NamespaceString nss(ns);
if (_backgroundThreadNamespaces.count(nss)) {
log() << "RocksRecordStoreThread " << ns << " already started";
diff --git a/src/mongo/db/storage/rocks/rocks_transaction.cpp b/src/mongo/db/storage/rocks/rocks_transaction.cpp
index 58e0bf3c6af..fd495a37b70 100644
--- a/src/mongo/db/storage/rocks/rocks_transaction.cpp
+++ b/src/mongo/db/storage/rocks/rocks_transaction.cpp
@@ -89,7 +89,7 @@ namespace mongo {
}
uint64_t newSnapshotId = 0;
{
- boost::mutex::scoped_lock lk(_transactionEngine->_lock);
+ boost::lock_guard<boost::mutex> lk(_transactionEngine->_lock);
for (const auto& key : _writtenKeys) {
invariant(
!_transactionEngine->_isKeyCommittedAfterSnapshot_inlock(key, _snapshotId));
@@ -108,7 +108,7 @@ namespace mongo {
}
bool RocksTransaction::registerWrite(const std::string& key) {
- boost::mutex::scoped_lock lk(_transactionEngine->_lock);
+ boost::lock_guard<boost::mutex> lk(_transactionEngine->_lock);
if (_transactionEngine->_isKeyCommittedAfterSnapshot_inlock(key, _snapshotId)) {
// write-committed write conflict
return false;
@@ -129,7 +129,7 @@ namespace mongo {
return;
}
{
- boost::mutex::scoped_lock lk(_transactionEngine->_lock);
+ boost::lock_guard<boost::mutex> lk(_transactionEngine->_lock);
for (const auto& key : _writtenKeys) {
_transactionEngine->_uncommittedTransactionId.erase(key);
}
@@ -140,7 +140,7 @@ namespace mongo {
void RocksTransaction::recordSnapshotId() {
{
- boost::mutex::scoped_lock lk(_transactionEngine->_lock);
+ boost::lock_guard<boost::mutex> lk(_transactionEngine->_lock);
_cleanup_inlock();
_activeSnapshotsIter = _transactionEngine->_getLatestSnapshotId_inlock();
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index 60350e0c279..4cbf3ae0318 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -382,7 +382,7 @@ namespace mongo {
if ( ret == EBUSY ) {
// this is expected, queue it up
{
- boost::mutex::scoped_lock lk( _identToDropMutex );
+ boost::lock_guard<boost::mutex> lk( _identToDropMutex );
_identToDrop.insert( uri );
}
_sessionCache->closeAll();
@@ -398,14 +398,14 @@ namespace mongo {
_sizeStorerSyncTracker.resetLastTime();
syncSizeInfo(false);
}
- boost::mutex::scoped_lock lk( _identToDropMutex );
+ boost::lock_guard<boost::mutex> lk( _identToDropMutex );
return !_identToDrop.empty();
}
void WiredTigerKVEngine::dropAllQueued() {
set<string> mine;
{
- boost::mutex::scoped_lock lk( _identToDropMutex );
+ boost::lock_guard<boost::mutex> lk( _identToDropMutex );
mine = _identToDrop;
}
@@ -433,7 +433,7 @@ namespace mongo {
}
{
- boost::mutex::scoped_lock lk( _identToDropMutex );
+ boost::lock_guard<boost::mutex> lk( _identToDropMutex );
for ( set<string>::const_iterator it = deleted.begin(); it != deleted.end(); ++it ) {
_identToDrop.erase( *it );
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
index 7860f591e06..1a41d145586 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
@@ -250,7 +250,7 @@ namespace {
WiredTigerRecordStore::~WiredTigerRecordStore() {
{
- boost::timed_mutex::scoped_lock lk(_cappedDeleterMutex);
+ boost::lock_guard<boost::timed_mutex> lk(_cappedDeleterMutex);
_shuttingDown = true;
}
@@ -265,7 +265,7 @@ namespace {
}
bool WiredTigerRecordStore::inShutdown() const {
- boost::timed_mutex::scoped_lock lk(_cappedDeleterMutex);
+ boost::lock_guard<boost::timed_mutex> lk(_cappedDeleterMutex);
return _shuttingDown;
}
@@ -392,7 +392,7 @@ namespace {
return 0;
// ensure only one thread at a time can do deletes, otherwise they'll conflict.
- boost::timed_mutex::scoped_lock lock(_cappedDeleterMutex, boost::defer_lock);
+ boost::unique_lock<boost::timed_mutex> lock(_cappedDeleterMutex, boost::defer_lock);
if (_cappedMaxDocs != -1) {
lock.lock(); // Max docs has to be exact, so have to check every time.
@@ -560,14 +560,14 @@ namespace {
return status;
loc = status.getValue();
if ( loc > _oplog_highestSeen ) {
- boost::mutex::scoped_lock lk( _uncommittedDiskLocsMutex );
+ boost::lock_guard<boost::mutex> lk( _uncommittedDiskLocsMutex );
if ( loc > _oplog_highestSeen ) {
_oplog_highestSeen = loc;
}
}
}
else if ( _isCapped ) {
- boost::mutex::scoped_lock lk( _uncommittedDiskLocsMutex );
+ boost::lock_guard<boost::mutex> lk( _uncommittedDiskLocsMutex );
loc = _nextId();
_addUncommitedDiskLoc_inlock( txn, loc );
}
@@ -597,7 +597,7 @@ namespace {
}
void WiredTigerRecordStore::dealtWithCappedLoc( const RecordId& loc ) {
- boost::mutex::scoped_lock lk( _uncommittedDiskLocsMutex );
+ boost::lock_guard<boost::mutex> lk( _uncommittedDiskLocsMutex );
SortedDiskLocs::iterator it = std::find(_uncommittedDiskLocs.begin(),
_uncommittedDiskLocs.end(),
loc);
@@ -606,7 +606,7 @@ namespace {
}
bool WiredTigerRecordStore::isCappedHidden( const RecordId& loc ) const {
- boost::mutex::scoped_lock lk( _uncommittedDiskLocsMutex );
+ boost::lock_guard<boost::mutex> lk( _uncommittedDiskLocsMutex );
if (_uncommittedDiskLocs.empty()) {
return false;
}
@@ -670,7 +670,7 @@ namespace {
}
void WiredTigerRecordStore::_oplogSetStartHack( WiredTigerRecoveryUnit* wru ) const {
- boost::mutex::scoped_lock lk( _uncommittedDiskLocsMutex );
+ boost::lock_guard<boost::mutex> lk( _uncommittedDiskLocsMutex );
if ( _uncommittedDiskLocs.empty() ) {
wru->setOplogReadTill( _oplog_highestSeen );
}
@@ -853,7 +853,7 @@ namespace {
if ( !loc.isOK() )
return loc.getStatus();
- boost::mutex::scoped_lock lk( _uncommittedDiskLocsMutex );
+ boost::lock_guard<boost::mutex> lk( _uncommittedDiskLocsMutex );
_addUncommitedDiskLoc_inlock( txn, loc.getValue() );
return Status::OK();
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp
index 312c6646825..88349b3dd2d 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp
@@ -100,7 +100,7 @@ namespace mongo {
WiredTigerRecordStore* rs =
checked_cast<WiredTigerRecordStore*>(collection->getRecordStore());
WriteUnitOfWork wuow(&txn);
- boost::timed_mutex::scoped_lock lock(rs->cappedDeleterMutex());
+ boost::lock_guard<boost::timed_mutex> lock(rs->cappedDeleterMutex());
int64_t removed = rs->cappedDeleteAsNeeded_inlock(&txn, RecordId::max());
wuow.commit();
return removed;
@@ -156,7 +156,7 @@ namespace mongo {
return false;
}
- boost::mutex::scoped_lock lock(_backgroundThreadMutex);
+ boost::lock_guard<boost::mutex> lock(_backgroundThreadMutex);
NamespaceString nss(ns);
if (_backgroundThreadNamespaces.count(nss)) {
log() << "WiredTigerRecordStoreThread " << ns << " already started";
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
index bdd01a5194b..be7d6429b56 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
@@ -56,14 +56,14 @@ namespace mongo {
}
void syncHappend() {
- boost::mutex::scoped_lock lk( mutex );
+ boost::lock_guard<boost::mutex> lk( mutex );
lastSyncTime++;
condvar.notify_all();
}
// return true if happened
bool awaitCommit() {
- boost::mutex::scoped_lock lk( mutex );
+ boost::unique_lock<boost::mutex> lk( mutex );
long long start = lastSyncTime;
numWaitingForSync.fetchAndAdd(1);
condvar.timed_wait(lk,boost::posix_time::milliseconds(50));
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp
index 20f177c05fd..8d61fb17712 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp
@@ -70,7 +70,7 @@ namespace mongo {
WiredTigerSizeStorer::~WiredTigerSizeStorer() {
// This shouldn't be necessary, but protects us if we screw up.
- boost::mutex::scoped_lock cursorLock( _cursorMutex );
+ boost::lock_guard<boost::mutex> cursorLock( _cursorMutex );
_magic = 11111;
_cursor->close(_cursor);
@@ -86,7 +86,7 @@ namespace mongo {
void WiredTigerSizeStorer::onCreate( WiredTigerRecordStore* rs,
long long numRecords, long long dataSize ) {
_checkMagic();
- boost::mutex::scoped_lock lk( _entriesMutex );
+ boost::lock_guard<boost::mutex> lk( _entriesMutex );
Entry& entry = _entries[rs->getURI()];
entry.rs = rs;
entry.numRecords = numRecords;
@@ -96,7 +96,7 @@ namespace mongo {
void WiredTigerSizeStorer::onDestroy( WiredTigerRecordStore* rs ) {
_checkMagic();
- boost::mutex::scoped_lock lk( _entriesMutex );
+ boost::lock_guard<boost::mutex> lk( _entriesMutex );
Entry& entry = _entries[rs->getURI()];
entry.numRecords = rs->numRecords( NULL );
entry.dataSize = rs->dataSize( NULL );
@@ -108,7 +108,7 @@ namespace mongo {
void WiredTigerSizeStorer::storeToCache( StringData uri,
long long numRecords, long long dataSize ) {
_checkMagic();
- boost::mutex::scoped_lock lk( _entriesMutex );
+ boost::lock_guard<boost::mutex> lk( _entriesMutex );
Entry& entry = _entries[uri.toString()];
entry.numRecords = numRecords;
entry.dataSize = dataSize;
@@ -118,7 +118,7 @@ namespace mongo {
void WiredTigerSizeStorer::loadFromCache( StringData uri,
long long* numRecords, long long* dataSize ) const {
_checkMagic();
- boost::mutex::scoped_lock lk( _entriesMutex );
+ boost::lock_guard<boost::mutex> lk( _entriesMutex );
Map::const_iterator it = _entries.find( uri.toString() );
if ( it == _entries.end() ) {
*numRecords = 0;
@@ -130,7 +130,7 @@ namespace mongo {
}
void WiredTigerSizeStorer::fillCache() {
- boost::mutex::scoped_lock cursorLock( _cursorMutex );
+ boost::lock_guard<boost::mutex> cursorLock( _cursorMutex );
_checkMagic();
Map m;
@@ -162,17 +162,17 @@ namespace mongo {
}
}
- boost::mutex::scoped_lock lk( _entriesMutex );
+ boost::lock_guard<boost::mutex> lk( _entriesMutex );
_entries.swap(m);
}
void WiredTigerSizeStorer::syncCache(bool syncToDisk) {
- boost::mutex::scoped_lock cursorLock( _cursorMutex );
+ boost::lock_guard<boost::mutex> cursorLock( _cursorMutex );
_checkMagic();
Map myMap;
{
- boost::mutex::scoped_lock lk( _entriesMutex );
+ boost::lock_guard<boost::mutex> lk( _entriesMutex );
for ( Map::iterator it = _entries.begin(); it != _entries.end(); ++it ) {
std::string uriKey = it->first;
Entry& entry = it->second;
@@ -227,7 +227,7 @@ namespace mongo {
invariantWTOK(session->commit_transaction(session, NULL));
{
- boost::mutex::scoped_lock lk( _entriesMutex );
+ boost::lock_guard<boost::mutex> lk( _entriesMutex );
for (Map::iterator it = _entries.begin(); it != _entries.end(); ++it) {
it->second.dirty = false;
}
diff --git a/src/mongo/dbtests/perftests.cpp b/src/mongo/dbtests/perftests.cpp
index 0b71f1d9cf5..76ae98c56d3 100644
--- a/src/mongo/dbtests/perftests.cpp
+++ b/src/mongo/dbtests/perftests.cpp
@@ -569,7 +569,7 @@ namespace PerfTests {
virtual int howLongMillis() { return 500; }
virtual bool showDurStats() { return false; }
void timed() {
- boost::mutex::scoped_lock lk(mboost);
+ boost::lock_guard<boost::mutex> lk(mboost);
}
};
class boosttimed_mutexspeed : public B {
@@ -578,7 +578,7 @@ namespace PerfTests {
virtual int howLongMillis() { return 500; }
virtual bool showDurStats() { return false; }
void timed() {
- boost::timed_mutex::scoped_lock lk(mboost_timed);
+ boost::lock_guard<boost::timed_mutex> lk(mboost_timed);
}
};
class simplemutexspeed : public B {
diff --git a/src/mongo/scripting/v8-3.25_utils.cpp b/src/mongo/scripting/v8-3.25_utils.cpp
index 566f1b3b879..8daa4f4dacd 100644
--- a/src/mongo/scripting/v8-3.25_utils.cpp
+++ b/src/mongo/scripting/v8-3.25_utils.cpp
@@ -162,11 +162,11 @@ namespace mongo {
BSONObj _args;
BSONObj _returnData;
void setErrored(bool value) {
- boost::mutex::scoped_lock lck(_erroredMutex);
+ boost::lock_guard<boost::mutex> lck(_erroredMutex);
_errored = value;
}
bool getErrored() {
- boost::mutex::scoped_lock lck(_erroredMutex);
+ boost::lock_guard<boost::mutex> lck(_erroredMutex);
return _errored;
}
private:
diff --git a/src/mongo/scripting/v8_utils.cpp b/src/mongo/scripting/v8_utils.cpp
index 098d04e8123..a540666c61e 100644
--- a/src/mongo/scripting/v8_utils.cpp
+++ b/src/mongo/scripting/v8_utils.cpp
@@ -161,11 +161,11 @@ namespace mongo {
BSONObj _args;
BSONObj _returnData;
void setErrored(bool value) {
- boost::mutex::scoped_lock lck(_erroredMutex);
+ boost::lock_guard<boost::mutex> lck(_erroredMutex);
_errored = value;
}
bool getErrored() {
- boost::mutex::scoped_lock lck(_erroredMutex);
+ boost::lock_guard<boost::mutex> lck(_erroredMutex);
return _errored;
}
private:
diff --git a/src/mongo/shell/bench.cpp b/src/mongo/shell/bench.cpp
index 069af0ee7c7..9af842f3e6e 100644
--- a/src/mongo/shell/bench.cpp
+++ b/src/mongo/shell/bench.cpp
@@ -241,7 +241,7 @@ namespace mongo {
}
void BenchRunState::waitForState(State awaitedState) {
- boost::mutex::scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
switch ( awaitedState ) {
case BRS_RUNNING:
@@ -265,7 +265,7 @@ namespace mongo {
}
void BenchRunState::assertFinished() {
- boost::mutex::scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
verify(0 == _numUnstartedWorkers + _numActiveWorkers);
}
@@ -274,7 +274,7 @@ namespace mongo {
}
void BenchRunState::onWorkerStarted() {
- boost::mutex::scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
verify( _numUnstartedWorkers > 0 );
--_numUnstartedWorkers;
++_numActiveWorkers;
@@ -284,7 +284,7 @@ namespace mongo {
}
void BenchRunState::onWorkerFinished() {
- boost::mutex::scoped_lock lk(_mutex);
+ boost::lock_guard<boost::mutex> lk(_mutex);
verify( _numActiveWorkers > 0 );
--_numActiveWorkers;
if (_numActiveWorkers + _numUnstartedWorkers == 0) {
@@ -761,7 +761,7 @@ namespace mongo {
_config(config) {
_oid.init();
- boost::mutex::scoped_lock lk(_staticMutex);
+ boost::lock_guard<boost::mutex> lk(_staticMutex);
_activeRuns[_oid] = this;
}
@@ -825,7 +825,7 @@ namespace mongo {
}
{
- boost::mutex::scoped_lock lk(_staticMutex);
+ boost::lock_guard<boost::mutex> lk(_staticMutex);
_activeRuns.erase( _oid );
}
}
@@ -836,7 +836,7 @@ namespace mongo {
}
BenchRunner* BenchRunner::get( OID oid ) {
- boost::mutex::scoped_lock lk(_staticMutex);
+ boost::lock_guard<boost::mutex> lk(_staticMutex);
return _activeRuns[ oid ];
}
diff --git a/src/mongo/shell/shell_utils_launcher.cpp b/src/mongo/shell/shell_utils_launcher.cpp
index 96f3c7bbdd3..48ff2dd3c36 100644
--- a/src/mongo/shell/shell_utils_launcher.cpp
+++ b/src/mongo/shell/shell_utils_launcher.cpp
@@ -92,18 +92,18 @@ namespace mongo {
ProgramOutputMultiplexer programOutputLogger;
bool ProgramRegistry::isPortRegistered( int port ) const {
- boost::recursive_mutex::scoped_lock lk( _mutex );
+ boost::lock_guard<boost::recursive_mutex> lk( _mutex );
return _ports.count( port ) == 1;
}
ProcessId ProgramRegistry::pidForPort( int port ) const {
- boost::recursive_mutex::scoped_lock lk( _mutex );
+ boost::lock_guard<boost::recursive_mutex> lk( _mutex );
verify( isPortRegistered( port ) );
return _ports.find( port )->second.first;
}
int ProgramRegistry::portForPid(ProcessId pid) const {
- boost::recursive_mutex::scoped_lock lk(_mutex);
+ boost::lock_guard<boost::recursive_mutex> lk(_mutex);
for (map<int, pair<ProcessId, int> >::const_iterator it = _ports.begin();
it != _ports.end(); ++it)
{
@@ -114,13 +114,13 @@ namespace mongo {
}
void ProgramRegistry::registerPort( int port, ProcessId pid, int output ) {
- boost::recursive_mutex::scoped_lock lk( _mutex );
+ boost::lock_guard<boost::recursive_mutex> lk( _mutex );
verify( !isPortRegistered( port ) );
_ports.insert( make_pair( port, make_pair( pid, output ) ) );
}
void ProgramRegistry::deletePort( int port ) {
- boost::recursive_mutex::scoped_lock lk( _mutex );
+ boost::lock_guard<boost::recursive_mutex> lk( _mutex );
if ( !isPortRegistered( port ) ) {
return;
}
@@ -129,7 +129,7 @@ namespace mongo {
}
void ProgramRegistry::getRegisteredPorts( vector<int> &ports ) {
- boost::recursive_mutex::scoped_lock lk( _mutex );
+ boost::lock_guard<boost::recursive_mutex> lk( _mutex );
for( map<int,pair<ProcessId,int> >::const_iterator i = _ports.begin(); i != _ports.end();
++i ) {
ports.push_back( i->first );
@@ -137,18 +137,18 @@ namespace mongo {
}
bool ProgramRegistry::isPidRegistered( ProcessId pid ) const {
- boost::recursive_mutex::scoped_lock lk( _mutex );
+ boost::lock_guard<boost::recursive_mutex> lk( _mutex );
return _pids.count( pid ) == 1;
}
void ProgramRegistry::registerPid( ProcessId pid, int output ) {
- boost::recursive_mutex::scoped_lock lk( _mutex );
+ boost::lock_guard<boost::recursive_mutex> lk( _mutex );
verify( !isPidRegistered( pid ) );
_pids.insert( make_pair( pid, output ) );
}
void ProgramRegistry::deletePid(ProcessId pid) {
- boost::recursive_mutex::scoped_lock lk(_mutex);
+ boost::lock_guard<boost::recursive_mutex> lk(_mutex);
if (!isPidRegistered(pid)) {
int port = portForPid(pid);
if (port < 0) return;
@@ -160,7 +160,7 @@ namespace mongo {
}
void ProgramRegistry::getRegisteredPids( vector<ProcessId> &pids ) {
- boost::recursive_mutex::scoped_lock lk( _mutex );
+ boost::lock_guard<boost::recursive_mutex> lk( _mutex );
for( map<ProcessId,int>::const_iterator i = _pids.begin(); i != _pids.end(); ++i ) {
pids.push_back( i->first );
}
diff --git a/src/mongo/util/concurrency/mutex.h b/src/mongo/util/concurrency/mutex.h
index 1e6de16c715..67eb565c37d 100644
--- a/src/mongo/util/concurrency/mutex.h
+++ b/src/mongo/util/concurrency/mutex.h
@@ -97,18 +97,6 @@ namespace mongo {
}
}
- class try_lock : boost::noncopyable {
- public:
- try_lock( mongo::mutex &m , int millis = 0 )
- : _l( m.boost() , incxtimemillis( millis ) ) ,
- ok( _l.owns_lock() )
- { }
- private:
- boost::timed_mutex::scoped_timed_lock _l;
- public:
- const bool ok;
- };
-
class scoped_lock : boost::noncopyable {
public:
scoped_lock( mongo::mutex &m ) :
@@ -116,9 +104,9 @@ namespace mongo {
}
~scoped_lock() {
}
- boost::timed_mutex::scoped_lock &boost() { return _l; }
+ boost::unique_lock<boost::timed_mutex>& boost() { return _l; }
private:
- boost::timed_mutex::scoped_lock _l;
+ boost::unique_lock<boost::timed_mutex> _l;
};
private:
boost::timed_mutex &boost() { return *_m; }
diff --git a/src/mongo/util/concurrency/qlock.h b/src/mongo/util/concurrency/qlock.h
index 445afbeb61c..d25da9be74b 100644
--- a/src/mongo/util/concurrency/qlock.h
+++ b/src/mongo/util/concurrency/qlock.h
@@ -180,7 +180,7 @@ namespace mongo {
// "i will be reading. i promise to coordinate my activities with w's as i go with more
// granular locks."
inline void QLock::lock_r() {
- boost::mutex::scoped_lock lk(m);
+ boost::lock_guard<boost::mutex> lk(m);
while( !r_legal() ) {
r.c.wait(m);
}
@@ -190,7 +190,7 @@ namespace mongo {
// "i will be writing. i promise to coordinate my activities with w's and r's as i go with more
// granular locks."
inline void QLock::lock_w() {
- boost::mutex::scoped_lock lk(m);
+ boost::lock_guard<boost::mutex> lk(m);
while( !w_legal() ) {
w.c.wait(m);
}
@@ -200,7 +200,7 @@ namespace mongo {
// "i will be reading. i will coordinate with no one. you better stop them if they
// are writing."
inline void QLock::lock_R() {
- boost::mutex::scoped_lock lk(m);
+ boost::lock_guard<boost::mutex> lk(m);
while( ! R_legal() ) {
R.c.wait(m);
}
@@ -209,7 +209,7 @@ namespace mongo {
inline bool QLock::lock_R_try(int millis) {
unsigned long long end = curTimeMillis64() + millis;
- boost::mutex::scoped_lock lk(m);
+ boost::lock_guard<boost::mutex> lk(m);
while( !R_legal() && curTimeMillis64() < end ) {
R.c.timed_wait(m, boost::posix_time::milliseconds(millis));
}
@@ -222,7 +222,7 @@ namespace mongo {
inline bool QLock::lock_W_try(int millis) {
unsigned long long end = curTimeMillis64() + millis;
- boost::mutex::scoped_lock lk(m);
+ boost::lock_guard<boost::mutex> lk(m);
++numPendingGlobalWrites;
while (!W_legal() && curTimeMillis64() < end) {
@@ -242,7 +242,7 @@ namespace mongo {
// downgrade from W state to R state
inline void QLock::W_to_R() {
- boost::mutex::scoped_lock lk(m);
+ boost::lock_guard<boost::mutex> lk(m);
fassert(16203, W.n == 1);
fassert(16204, R.n == 0);
fassert(16205, U.n == 0);
@@ -262,7 +262,7 @@ namespace mongo {
// NOTE: ONLY CALL THIS FUNCTION ON A THREAD THAT GOT TO R BY CALLING W_to_R(), OR
// YOU MAY DEADLOCK WITH THREADS LEAVING THE X STATE.
inline void QLock::R_to_W() {
- boost::mutex::scoped_lock lk(m);
+ boost::lock_guard<boost::mutex> lk(m);
fassert(16206, R.n > 0);
fassert(16207, W.n == 0);
fassert(16208, U.n == 0);
@@ -286,7 +286,7 @@ namespace mongo {
}
inline bool QLock::w_to_X() {
- boost::mutex::scoped_lock lk(m);
+ boost::lock_guard<boost::mutex> lk(m);
fassert( 16212, w.n > 0 );
@@ -315,7 +315,7 @@ namespace mongo {
}
inline void QLock::X_to_w() {
- boost::mutex::scoped_lock lk(m);
+ boost::lock_guard<boost::mutex> lk(m);
fassert( 16219, W.n == 0 );
fassert( 16220, R.n == 0 );
@@ -338,25 +338,25 @@ namespace mongo {
W.n++;
}
inline void QLock::lock_W() {
- boost::mutex::scoped_lock lk(m);
+ boost::lock_guard<boost::mutex> lk(m);
_lock_W();
}
inline void QLock::unlock_r() {
- boost::mutex::scoped_lock lk(m);
+ boost::lock_guard<boost::mutex> lk(m);
fassert(16137, r.n > 0);
--r.n;
notifyWeUnlocked('r');
}
inline void QLock::unlock_w() {
- boost::mutex::scoped_lock lk(m);
+ boost::lock_guard<boost::mutex> lk(m);
fassert(16138, w.n > 0);
--w.n;
notifyWeUnlocked('w');
}
inline void QLock::unlock_R() {
- boost::mutex::scoped_lock lk(m);
+ boost::lock_guard<boost::mutex> lk(m);
_unlock_R();
}
@@ -367,7 +367,7 @@ namespace mongo {
}
inline void QLock::unlock_W() {
- boost::mutex::scoped_lock lk(m);
+ boost::lock_guard<boost::mutex> lk(m);
fassert(16140, W.n == 1);
--W.n;
notifyWeUnlocked('W');
diff --git a/src/mongo/util/concurrency/ticketholder.cpp b/src/mongo/util/concurrency/ticketholder.cpp
index 44793197daf..efec9b50ee2 100644
--- a/src/mongo/util/concurrency/ticketholder.cpp
+++ b/src/mongo/util/concurrency/ticketholder.cpp
@@ -80,7 +80,7 @@ namespace mongo {
}
Status TicketHolder::resize(int newSize) {
- boost::mutex::scoped_lock lk(_resizeMutex);
+ boost::lock_guard<boost::mutex> lk(_resizeMutex);
if (newSize < 5)
return Status(ErrorCodes::BadValue,
diff --git a/src/mongo/util/signal_handlers_synchronous.cpp b/src/mongo/util/signal_handlers_synchronous.cpp
index d4bc2f75234..dcad2992d02 100644
--- a/src/mongo/util/signal_handlers_synchronous.cpp
+++ b/src/mongo/util/signal_handlers_synchronous.cpp
@@ -32,7 +32,7 @@
#include "mongo/util/signal_handlers_synchronous.h"
-#include <boost/thread/mutex.hpp>
+#include <boost/thread.hpp>
#include <exception>
#include <iostream>
#include <memory>
@@ -129,7 +129,7 @@ namespace {
// this will be called in certain c++ error cases, for example if there are two active
// exceptions
void myTerminate() {
- boost::mutex::scoped_lock lk(streamMutex);
+ boost::lock_guard<boost::mutex> lk(streamMutex);
// In c++11 we can recover the current exception to print it.
if (std::exception_ptr eptr = std::current_exception()) {
@@ -183,7 +183,7 @@ namespace {
}
void abruptQuit(int signalNum) {
- boost::mutex::scoped_lock lk(streamMutex);
+ boost::lock_guard<boost::mutex> lk(streamMutex);
printSignalAndBacktrace(signalNum);
// Don't go through normal shutdown procedure. It may make things worse.
@@ -222,7 +222,7 @@ namespace {
#else
void abruptQuitWithAddrSignal( int signalNum, siginfo_t *siginfo, void * ) {
- boost::mutex::scoped_lock lk(streamMutex);
+ boost::lock_guard<boost::mutex> lk(streamMutex);
const char* action = (signalNum == SIGSEGV || signalNum == SIGBUS) ? "access" : "operation";
mallocFreeOStream << "Invalid " << action << " at address: " << siginfo->si_addr;
@@ -275,7 +275,7 @@ namespace {
}
void reportOutOfMemoryErrorAndExit() {
- boost::mutex::scoped_lock lk(streamMutex);
+ boost::lock_guard<boost::mutex> lk(streamMutex);
printStackTrace(mallocFreeOStream << "out of memory.\n");
writeMallocFreeStreamToLog();
quickExit(EXIT_ABRUPT);
diff --git a/src/mongo/util/tcmalloc_server_status_section.cpp b/src/mongo/util/tcmalloc_server_status_section.cpp
index ab12f6c98b2..8b5ca0ba649 100644
--- a/src/mongo/util/tcmalloc_server_status_section.cpp
+++ b/src/mongo/util/tcmalloc_server_status_section.cpp
@@ -72,7 +72,7 @@ namespace {
// We synchronize as the tcmalloc central list uses a spinlock, and we can cause a really
// terrible runaway if we're not careful.
- boost::mutex::scoped_lock lk(tcmallocCleanupLock);
+ boost::lock_guard<boost::mutex> lk(tcmallocCleanupLock);
MallocExtension::instance()->MarkThreadIdle();
MallocExtension::instance()->MarkThreadBusy();
}