summaryrefslogtreecommitdiff
path: root/src/mongo/db/repl
diff options
context:
space:
mode:
authorAdam Midvidy <amidvidy@gmail.com>2015-06-17 10:27:13 -0400
committerAdam Midvidy <amidvidy@gmail.com>2015-06-17 13:32:39 -0400
commit449e0f2b47e32060433cb6f68d967ea53c8573d1 (patch)
tree6e857f03a636b34ad8710e0b3e35a9115523483b /src/mongo/db/repl
parentcbc69e7dcb875b35e161875317bca701b48c770c (diff)
downloadmongo-449e0f2b47e32060433cb6f68d967ea53c8573d1.tar.gz
SERVER-18723 boost -> stdx for mutex, unique_lock, and lock_guard
Diffstat (limited to 'src/mongo/db/repl')
-rw-r--r--src/mongo/db/repl/bgsync.cpp42
-rw-r--r--src/mongo/db/repl/bgsync.h12
-rw-r--r--src/mongo/db/repl/check_quorum_for_config_change_test.cpp6
-rw-r--r--src/mongo/db/repl/oplog.cpp6
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp4
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.h4
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_mock.cpp8
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_mock.h4
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp110
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.h10
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp14
-rw-r--r--src/mongo/db/repl/replication_executor.cpp42
-rw-r--r--src/mongo/db/repl/replication_executor.h8
-rw-r--r--src/mongo/db/repl/reporter.cpp14
-rw-r--r--src/mongo/db/repl/reporter.h2
-rw-r--r--src/mongo/db/repl/sync_source_feedback.cpp14
-rw-r--r--src/mongo/db/repl/sync_source_feedback.h4
-rw-r--r--src/mongo/db/repl/task_runner.cpp18
-rw-r--r--src/mongo/db/repl/task_runner.h4
21 files changed, 165 insertions, 165 deletions
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index a9c5e068bc7..5317ab36305 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -68,7 +68,7 @@ namespace {
MONGO_FP_DECLARE(rsBgSyncProduce);
BackgroundSync* BackgroundSync::s_instance = 0;
- boost::mutex BackgroundSync::s_mutex;
+ stdx::mutex BackgroundSync::s_mutex;
//The number and time spent reading batches off the network
static TimerStats getmoreReplStats;
@@ -119,7 +119,7 @@ namespace {
}
BackgroundSync* BackgroundSync::get() {
- boost::unique_lock<boost::mutex> lock(s_mutex);
+ stdx::unique_lock<stdx::mutex> lock(s_mutex);
if (s_instance == NULL && !inShutdown()) {
s_instance = new BackgroundSync();
}
@@ -127,7 +127,7 @@ namespace {
}
void BackgroundSync::shutdown() {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
// Clear the buffer in case the producerThread is waiting in push() due to a full queue.
invariant(inShutdown());
@@ -140,7 +140,7 @@ namespace {
}
void BackgroundSync::notify(OperationContext* txn) {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
// If all ops in the buffer have been applied, unblock waitForRepl (if it's waiting)
if (_buffer.empty()) {
@@ -206,7 +206,7 @@ namespace {
// this oplog reader does not do a handshake because we don't want the server it's syncing
// from to track how far it has synced
{
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
if (_lastOpTimeFetched.isNull()) {
// then we're initial syncing and we're still waiting for this to be set
lock.unlock();
@@ -232,7 +232,7 @@ namespace {
// find a target to sync from the last optime fetched
OpTime lastOpTimeFetched;
{
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
lastOpTimeFetched = _lastOpTimeFetched;
_syncSourceHost = HostAndPort();
}
@@ -240,7 +240,7 @@ namespace {
_syncSourceReader.connectToSyncSource(txn, lastOpTimeFetched, _replCoord);
{
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
// no server found
if (_syncSourceReader.getHost().empty()) {
lock.unlock();
@@ -311,7 +311,7 @@ namespace {
// If there is still no data from upstream, check a few more things
// and then loop back for another pass at getting more data
{
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
if (_pause) {
return;
}
@@ -341,7 +341,7 @@ namespace {
opsReadStats.increment();
{
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
_appliedBuffer = false;
}
@@ -354,7 +354,7 @@ namespace {
_buffer.push(o);
{
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
_lastFetchedHash = o["h"].numberLong();
_lastOpTimeFetched = extractOpTime(o);
LOG(3) << "lastOpTimeFetched: " << _lastOpTimeFetched;
@@ -461,17 +461,17 @@ namespace {
}
HostAndPort BackgroundSync::getSyncTarget() {
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
return _syncSourceHost;
}
void BackgroundSync::clearSyncTarget() {
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
_syncSourceHost = HostAndPort();
}
void BackgroundSync::stop() {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
_pause = true;
_syncSourceHost = HostAndPort();
@@ -485,7 +485,7 @@ namespace {
massert(16235, "going to start syncing, but buffer is not empty", _buffer.empty());
long long updatedLastAppliedHash = _readLastAppliedHash(txn);
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_pause = false;
// reset _last fields with current oplog data
@@ -498,14 +498,14 @@ namespace {
}
void BackgroundSync::waitUntilPaused() {
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
while (!_pause) {
_pausedCondition.wait(lock);
}
}
long long BackgroundSync::getLastAppliedHash() const {
- boost::lock_guard<boost::mutex> lck(_mutex);
+ stdx::lock_guard<stdx::mutex> lck(_mutex);
return _lastAppliedHash;
}
@@ -514,13 +514,13 @@ namespace {
}
void BackgroundSync::setLastAppliedHash(long long newHash) {
- boost::lock_guard<boost::mutex> lck(_mutex);
+ stdx::lock_guard<stdx::mutex> lck(_mutex);
_lastAppliedHash = newHash;
}
void BackgroundSync::loadLastAppliedHash(OperationContext* txn) {
long long result = _readLastAppliedHash(txn);
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_lastAppliedHash = result;
}
@@ -558,17 +558,17 @@ namespace {
}
bool BackgroundSync::getInitialSyncRequestedFlag() {
- boost::lock_guard<boost::mutex> lock(_initialSyncMutex);
+ stdx::lock_guard<stdx::mutex> lock(_initialSyncMutex);
return _initialSyncRequestedFlag;
}
void BackgroundSync::setInitialSyncRequestedFlag(bool value) {
- boost::lock_guard<boost::mutex> lock(_initialSyncMutex);
+ stdx::lock_guard<stdx::mutex> lock(_initialSyncMutex);
_initialSyncRequestedFlag = value;
}
void BackgroundSync::pushTestOpToBuffer(const BSONObj& op) {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
_buffer.push(op);
}
diff --git a/src/mongo/db/repl/bgsync.h b/src/mongo/db/repl/bgsync.h
index ad16f4cdc99..ee14d380fe9 100644
--- a/src/mongo/db/repl/bgsync.h
+++ b/src/mongo/db/repl/bgsync.h
@@ -29,12 +29,12 @@
#pragma once
#include <boost/thread/condition.hpp>
-#include <boost/thread/mutex.hpp>
-#include "mongo/util/queue.h"
+#include "mongo/db/jsobj.h"
#include "mongo/db/repl/oplogreader.h"
#include "mongo/db/repl/optime.h"
-#include "mongo/db/jsobj.h"
+#include "mongo/stdx/mutex.h"
+#include "mongo/util/queue.h"
namespace mongo {
@@ -133,14 +133,14 @@ namespace repl {
private:
static BackgroundSync *s_instance;
// protects creation of s_instance
- static boost::mutex s_mutex;
+ static stdx::mutex s_mutex;
// Production thread
BlockingQueue<BSONObj> _buffer;
OplogReader _syncSourceReader;
// _mutex protects all of the class variables except _syncSourceReader and _buffer
- mutable boost::mutex _mutex;
+ mutable stdx::mutex _mutex;
OpTime _lastOpTimeFetched;
@@ -183,7 +183,7 @@ namespace repl {
// bool for indicating resync need on this node and the mutex that protects it
// The resync command sets this flag; the Applier thread observes and clears it.
bool _initialSyncRequestedFlag;
- boost::mutex _initialSyncMutex;
+ stdx::mutex _initialSyncMutex;
// This setting affects the Applier prefetcher behavior.
IndexPrefetchConfig _indexPrefetchConfig;
diff --git a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
index e8e210cb78c..8432769a99c 100644
--- a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
+++ b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
@@ -86,7 +86,7 @@ namespace {
std::unique_ptr<boost::thread> _executorThread;
std::unique_ptr<boost::thread> _quorumCheckThread;
Status _quorumCheckStatus;
- boost::mutex _mutex;
+ stdx::mutex _mutex;
bool _isQuorumCheckDone;
};
@@ -123,13 +123,13 @@ namespace {
}
bool CheckQuorumTest::isQuorumCheckDone() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
return _isQuorumCheckDone;
}
void CheckQuorumTest::_runQuorumCheck(const ReplicaSetConfig& config, int myIndex) {
_quorumCheckStatus = _runQuorumCheckImpl(config, myIndex);
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_isQuorumCheckDone = true;
}
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index a2bebd9facb..bc394af7b2f 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -130,7 +130,7 @@ namespace {
const char* ns,
ReplicationCoordinator* replCoord,
const char* opstr) {
- boost::lock_guard<boost::mutex> lk(newOpMutex);
+ stdx::lock_guard<stdx::mutex> lk(newOpMutex);
Timestamp ts = getNextGlobalTimestamp();
newTimestampNotifier.notify_all();
@@ -870,7 +870,7 @@ namespace {
}
void waitUpToOneSecondForTimestampChange(const Timestamp& referenceTime) {
- boost::unique_lock<boost::mutex> lk(newOpMutex);
+ stdx::unique_lock<stdx::mutex> lk(newOpMutex);
while (referenceTime == getLastSetTimestamp()) {
if (!newTimestampNotifier.timed_wait(lk, boost::posix_time::seconds(1)))
@@ -879,7 +879,7 @@ namespace {
}
void setNewTimestamp(const Timestamp& newTime) {
- boost::lock_guard<boost::mutex> lk(newOpMutex);
+ stdx::lock_guard<stdx::mutex> lk(newOpMutex);
setGlobalTimestamp(newTime);
newTimestampNotifier.notify_all();
}
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index 942681cedf6..f1d2232988f 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -85,7 +85,7 @@ namespace {
ReplicationCoordinatorExternalStateImpl::~ReplicationCoordinatorExternalStateImpl() {}
void ReplicationCoordinatorExternalStateImpl::startThreads() {
- boost::lock_guard<boost::mutex> lk(_threadMutex);
+ stdx::lock_guard<stdx::mutex> lk(_threadMutex);
if (_startedThreads) {
return;
}
@@ -104,7 +104,7 @@ namespace {
}
void ReplicationCoordinatorExternalStateImpl::shutdown() {
- boost::lock_guard<boost::mutex> lk(_threadMutex);
+ stdx::lock_guard<stdx::mutex> lk(_threadMutex);
if (_startedThreads) {
log() << "Stopping replication applier threads";
_syncSourceFeedback.shutdown();
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.h b/src/mongo/db/repl/replication_coordinator_external_state_impl.h
index 62edea65c70..8f5cc27d466 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.h
@@ -69,7 +69,7 @@ namespace repl {
private:
// Guards starting threads and setting _startedThreads
- boost::mutex _threadMutex;
+ stdx::mutex _threadMutex;
// True when the threads have been started
bool _startedThreads;
@@ -89,7 +89,7 @@ namespace repl {
std::unique_ptr<boost::thread> _producerThread;
// Mutex guarding the _nextThreadId value to prevent concurrent incrementing.
- boost::mutex _nextThreadIdMutex;
+ stdx::mutex _nextThreadIdMutex;
// Number used to uniquely name threads.
long long _nextThreadId;
};
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp b/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
index abad48703af..2ab0103f6b1 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
@@ -93,7 +93,7 @@ namespace repl {
OperationContext* txn,
const BSONObj& config) {
{
- boost::unique_lock<boost::mutex> lock(_shouldHangConfigMutex);
+ stdx::unique_lock<stdx::mutex> lock(_shouldHangConfigMutex);
while (_storeLocalConfigDocumentShouldHang) {
_shouldHangConfigCondVar.wait(lock);
}
@@ -120,7 +120,7 @@ namespace repl {
OperationContext* txn,
const LastVote& lastVote) {
{
- boost::unique_lock<boost::mutex> lock(_shouldHangLastVoteMutex);
+ stdx::unique_lock<stdx::mutex> lock(_shouldHangLastVoteMutex);
while (_storeLocalLastVoteDocumentShouldHang) {
_shouldHangLastVoteCondVar.wait(lock);
}
@@ -156,7 +156,7 @@ namespace repl {
}
void ReplicationCoordinatorExternalStateMock::setStoreLocalConfigDocumentToHang(bool hang) {
- boost::unique_lock<boost::mutex> lock(_shouldHangConfigMutex);
+ stdx::unique_lock<stdx::mutex> lock(_shouldHangConfigMutex);
_storeLocalConfigDocumentShouldHang = hang;
if (!hang) {
_shouldHangConfigCondVar.notify_all();
@@ -169,7 +169,7 @@ namespace repl {
}
void ReplicationCoordinatorExternalStateMock::setStoreLocalLastVoteDocumentToHang(bool hang) {
- boost::unique_lock<boost::mutex> lock(_shouldHangLastVoteMutex);
+ stdx::unique_lock<stdx::mutex> lock(_shouldHangLastVoteMutex);
_storeLocalLastVoteDocumentShouldHang = hang;
if (!hang) {
_shouldHangLastVoteCondVar.notify_all();
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_mock.h b/src/mongo/db/repl/replication_coordinator_external_state_mock.h
index eca57e01c06..c5e2684d5a3 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_mock.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state_mock.h
@@ -131,10 +131,10 @@ namespace repl {
Status _storeLocalConfigDocumentStatus;
Status _storeLocalLastVoteDocumentStatus;
// mutex and cond var for controlling stroeLocalConfigDocument()'s hanging
- boost::mutex _shouldHangConfigMutex;
+ stdx::mutex _shouldHangConfigMutex;
boost::condition _shouldHangConfigCondVar;
// mutex and cond var for controlling stroeLocalLastVoteDocument()'s hanging
- boost::mutex _shouldHangLastVoteMutex;
+ stdx::mutex _shouldHangLastVoteMutex;
boost::condition _shouldHangLastVoteCondVar;
bool _storeLocalConfigDocumentShouldHang;
bool _storeLocalLastVoteDocumentShouldHang;
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 23e15702299..19bfba131f4 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -79,7 +79,7 @@ namespace repl {
namespace {
using executor::NetworkInterface;
- void lockAndCall(boost::unique_lock<boost::mutex>* lk, const stdx::function<void ()>& fn) {
+ void lockAndCall(stdx::unique_lock<stdx::mutex>* lk, const stdx::function<void ()>& fn) {
if (!lk->owns_lock()) {
lk->lock();
}
@@ -228,14 +228,14 @@ namespace {
ReplicationCoordinatorImpl::~ReplicationCoordinatorImpl() {}
void ReplicationCoordinatorImpl::waitForStartUpComplete() {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
while (_rsConfigState == kConfigPreStart || _rsConfigState == kConfigStartingUp) {
_rsConfigStateChange.wait(lk);
}
}
ReplicaSetConfig ReplicationCoordinatorImpl::getReplicaSetConfig_forTest() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
return _rsConfig;
}
@@ -337,7 +337,7 @@ namespace {
}
}
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
invariant(_rsConfigState == kConfigStartingUp);
const PostMemberStateUpdateAction action =
_setCurrentRSConfig_inlock(localConfig, myIndex.getValue());
@@ -352,7 +352,7 @@ namespace {
void ReplicationCoordinatorImpl::startReplication(OperationContext* txn) {
if (!isReplEnabled()) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_setConfigState_inlock(kConfigReplicationDisabled);
return;
}
@@ -360,7 +360,7 @@ namespace {
{
OID rid = _externalState->ensureMe(txn);
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
fassert(18822, !_inShutdown);
_setConfigState_inlock(kConfigStartingUp);
_myRID = rid;
@@ -381,7 +381,7 @@ namespace {
if (doneLoadingConfig) {
// If we're not done loading the config, then the config state will be set by
// _finishLoadLocalConfig.
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
invariant(!_rsConfig.isInitialized());
_setConfigState_inlock(kConfigUninitialized);
}
@@ -399,7 +399,7 @@ namespace {
}
{
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
fassert(28533, !_inShutdown);
_inShutdown = true;
if (_rsConfigState == kConfigPreStart) {
@@ -430,7 +430,7 @@ namespace {
}
MemberState ReplicationCoordinatorImpl::getMemberState() const {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
return _getMemberState_inlock();
}
@@ -439,7 +439,7 @@ namespace {
}
Seconds ReplicationCoordinatorImpl::getSlaveDelaySecs() const {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
invariant(_rsConfig.isInitialized());
uassert(28524,
"Node not a member of the current set configuration",
@@ -530,7 +530,7 @@ namespace {
return;
}
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
_topCoord->setFollowerMode(newState.s);
const PostMemberStateUpdateAction action =
@@ -542,7 +542,7 @@ namespace {
}
bool ReplicationCoordinatorImpl::isWaitingForApplierToDrain() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
return _isWaitingForDrainToComplete;
}
@@ -569,7 +569,7 @@ namespace {
// external writes will be processed. This is important so that a new temp collection isn't
// introduced on the new primary before we drop all the temp collections.
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
if (!_isWaitingForDrainToComplete) {
return;
}
@@ -693,7 +693,7 @@ namespace {
Status ReplicationCoordinatorImpl::setLastOptimeForSlave(const OID& rid,
const Timestamp& ts) {
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
massert(28576,
"Received an old style replication progress update, which is only used for Master/"
"Slave replication now, but this node is not using Master/Slave replication. "
@@ -731,18 +731,18 @@ namespace {
}
void ReplicationCoordinatorImpl::setMyLastOptime(const OpTime& opTime) {
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
_setMyLastOptime_inlock(&lock, opTime, false);
}
void ReplicationCoordinatorImpl::resetMyLastOptime() {
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
// Reset to uninitialized OpTime
_setMyLastOptime_inlock(&lock, OpTime(), true);
}
void ReplicationCoordinatorImpl::_setMyLastOptime_inlock(
- boost::unique_lock<boost::mutex>* lock, const OpTime& opTime, bool isRollbackAllowed) {
+ stdx::unique_lock<stdx::mutex>* lock, const OpTime& opTime, bool isRollbackAllowed) {
invariant(lock->owns_lock());
SlaveInfo* mySlaveInfo = &_slaveInfo[_getMyIndexInSlaveInfo_inlock()];
invariant(isRollbackAllowed || mySlaveInfo->opTime <= opTime);
@@ -768,7 +768,7 @@ namespace {
}
OpTime ReplicationCoordinatorImpl::getMyLastOptime() const {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
return _getMyLastOptime_inlock();
}
@@ -797,7 +797,7 @@ namespace {
#endif
Timer timer;
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
while (ts > _getMyLastOptime_inlock()) {
Status interruptedStatus = txn->checkForInterruptNoAssert();
@@ -853,7 +853,7 @@ namespace {
Status ReplicationCoordinatorImpl::setLastOptime_forTest(long long cfgVer,
long long memberId,
const OpTime& opTime) {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
invariant(getReplicationMode() == modeReplSet);
const UpdatePositionArgs::UpdateInfo update(OID(), opTime, cfgVer, memberId);
@@ -925,7 +925,7 @@ namespace {
}
void ReplicationCoordinatorImpl::interrupt(unsigned opId) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
for (std::vector<WaiterInfo*>::iterator it = _replicationWaiterList.begin();
it != _replicationWaiterList.end(); ++it) {
WaiterInfo* info = *it;
@@ -949,7 +949,7 @@ namespace {
}
void ReplicationCoordinatorImpl::interruptAll() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
for (std::vector<WaiterInfo*>::iterator it = _replicationWaiterList.begin();
it != _replicationWaiterList.end(); ++it) {
WaiterInfo* info = *it;
@@ -1045,7 +1045,7 @@ namespace {
const OpTime& opTime,
const WriteConcernOptions& writeConcern) {
Timer timer;
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
return _awaitReplication_inlock(&timer, &lock, txn, opTime, writeConcern);
}
@@ -1054,7 +1054,7 @@ namespace {
OperationContext* txn,
const WriteConcernOptions& writeConcern) {
Timer timer;
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
return _awaitReplication_inlock(
&timer,
&lock,
@@ -1065,7 +1065,7 @@ namespace {
ReplicationCoordinator::StatusAndDuration ReplicationCoordinatorImpl::_awaitReplication_inlock(
const Timer* timer,
- boost::unique_lock<boost::mutex>* lock,
+ stdx::unique_lock<stdx::mutex>* lock,
OperationContext* txn,
const OpTime& opTime,
const WriteConcernOptions& writeConcern) {
@@ -1295,7 +1295,7 @@ namespace {
this,
stdx::placeholders::_1));
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
const PostMemberStateUpdateAction action =
_updateMemberStateFromTopologyCoordinator_inlock();
lk.unlock();
@@ -1353,7 +1353,7 @@ namespace {
bool ReplicationCoordinatorImpl::isMasterForReportingPurposes() {
if (_settings.usingReplSets()) {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
if (getReplicationMode() == modeReplSet && _getMemberState_inlock().primary()) {
return true;
}
@@ -1445,7 +1445,7 @@ namespace {
// always enforce on local
return false;
}
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
if (getReplicationMode() != modeReplSet) {
return false;
}
@@ -1463,12 +1463,12 @@ namespace {
}
OID ReplicationCoordinatorImpl::getElectionId() {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
return _electionId;
}
OID ReplicationCoordinatorImpl::getMyRID() const {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
return _getMyRID_inlock();
}
@@ -1477,7 +1477,7 @@ namespace {
}
int ReplicationCoordinatorImpl::getMyId() const {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
return _getMyId_inlock();
}
@@ -1488,7 +1488,7 @@ namespace {
bool ReplicationCoordinatorImpl::prepareReplSetUpdatePositionCommand(
BSONObjBuilder* cmdBuilder) {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
invariant(_rsConfig.isInitialized());
// do not send updates if we have been removed from the config
if (_selfIndex == -1) {
@@ -1572,7 +1572,7 @@ namespace {
}
void ReplicationCoordinatorImpl::appendSlaveInfoData(BSONObjBuilder* result) {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
BSONArrayBuilder replicationProgress(result->subarrayStart("replicationProgress"));
{
for (SlaveInfoVector::const_iterator itr = _slaveInfo.begin();
@@ -1594,12 +1594,12 @@ namespace {
}
ReplicaSetConfig ReplicationCoordinatorImpl::getConfig() const {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
return _rsConfig;
}
void ReplicationCoordinatorImpl::processReplSetGetConfig(BSONObjBuilder* result) {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
result->append("config", _rsConfig.toBSON());
}
@@ -1657,7 +1657,7 @@ namespace {
return;
}
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
if (_getMemberState_inlock().primary()) {
*result = Status(ErrorCodes::NotSecondary, "primaries can't modify maintenance mode");
return;
@@ -1749,7 +1749,7 @@ namespace {
Status ReplicationCoordinatorImpl::processHeartbeat(const ReplSetHeartbeatArgs& args,
ReplSetHeartbeatResponse* response) {
{
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
if (_rsConfigState == kConfigPreStart || _rsConfigState == kConfigStartingUp) {
return Status(ErrorCodes::NotYetInitialized,
"Received heartbeat while still initializing replication system");
@@ -1808,7 +1808,7 @@ namespace {
log() << "replSetReconfig admin command received from client";
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
while (_rsConfigState == kConfigPreStart || _rsConfigState == kConfigStartingUp) {
_rsConfigStateChange.wait(lk);
@@ -1931,7 +1931,7 @@ namespace {
const ReplicaSetConfig& newConfig,
int myIndex) {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
invariant(_rsConfigState == kConfigReconfiguring);
invariant(_rsConfig.isInitialized());
const PostMemberStateUpdateAction action = _setCurrentRSConfig_inlock(newConfig, myIndex);
@@ -1944,7 +1944,7 @@ namespace {
BSONObjBuilder* resultObj) {
log() << "replSetInitiate admin command received from client";
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
if (!_settings.usingReplSets()) {
return Status(ErrorCodes::NoReplicationEnabled, "server is not running with --replSet");
}
@@ -2035,7 +2035,7 @@ namespace {
const ReplicaSetConfig& newConfig,
int myIndex) {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
invariant(_rsConfigState == kConfigInitiating);
invariant(!_rsConfig.isInitialized());
const PostMemberStateUpdateAction action = _setCurrentRSConfig_inlock(newConfig, myIndex);
@@ -2120,7 +2120,7 @@ namespace {
_externalState->clearShardingState();
break;
case kActionWinElection: {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
_electionId = OID::gen();
_topCoord->processWinElection(_electionId, getNextGlobalTimestamp());
_isWaitingForDrainToComplete = true;
@@ -2138,13 +2138,13 @@ namespace {
}
Status ReplicationCoordinatorImpl::processReplSetGetRBID(BSONObjBuilder* resultObj) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
resultObj->append("rbid", _rbid);
return Status::OK();
}
void ReplicationCoordinatorImpl::incrementRollbackID() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
++_rbid;
}
@@ -2263,7 +2263,7 @@ namespace {
Status ReplicationCoordinatorImpl::processReplSetUpdatePosition(
const UpdatePositionArgs& updates, long long* configVersion) {
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
Status status = Status::OK();
bool somethingChanged = false;
for (UpdatePositionArgs::UpdateIterator update = updates.updatesBegin();
@@ -2290,7 +2290,7 @@ namespace {
const HandshakeArgs& handshake) {
LOG(2) << "Received handshake " << handshake.toBSON();
- boost::unique_lock<boost::mutex> lock(_mutex);
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
if (getReplicationMode() != modeMasterSlave) {
return Status(ErrorCodes::IllegalOperation,
@@ -2313,7 +2313,7 @@ namespace {
}
bool ReplicationCoordinatorImpl::buildsIndexes() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_selfIndex == -1) {
return true;
}
@@ -2323,7 +2323,7 @@ namespace {
std::vector<HostAndPort> ReplicationCoordinatorImpl::getHostsWrittenTo(const OpTime& op) {
std::vector<HostAndPort> hosts;
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
for (size_t i = 0; i < _slaveInfo.size(); ++i) {
const SlaveInfo& slaveInfo = _slaveInfo[i];
if (slaveInfo.opTime < op) {
@@ -2340,7 +2340,7 @@ namespace {
}
std::vector<HostAndPort> ReplicationCoordinatorImpl::getOtherNodesInReplSet() const {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
invariant(_settings.usingReplSets());
std::vector<HostAndPort> nodes;
@@ -2359,7 +2359,7 @@ namespace {
Status ReplicationCoordinatorImpl::checkIfWriteConcernCanBeSatisfied(
const WriteConcernOptions& writeConcern) const {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
return _checkIfWriteConcernCanBeSatisfied_inlock(writeConcern);
}
@@ -2384,7 +2384,7 @@ namespace {
}
WriteConcernOptions ReplicationCoordinatorImpl::getGetLastErrorDefault() {
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
if (_rsConfig.isInitialized()) {
return _rsConfig.getDefaultWriteConcern();
}
@@ -2489,7 +2489,7 @@ namespace {
else {
lastOpTime = lastOpTimeStatus.getValue();
}
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
_setMyLastOptime_inlock(&lk, lastOpTime, true);
_externalState->setGlobalTimestamp(lastOpTime.getTimestamp());
}
@@ -2551,7 +2551,7 @@ namespace {
}
OpTime ReplicationCoordinatorImpl::getLastCommittedOpTime() const {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
return _lastCommittedOpTime;
}
@@ -2602,7 +2602,7 @@ namespace {
return;
}
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
_topCoord->processReplSetRequestVotes(args, response, getMyLastOptime());
*result = Status::OK();
}
@@ -2659,7 +2659,7 @@ namespace {
Status ReplicationCoordinatorImpl::processHeartbeatV1(const ReplSetHeartbeatArgsV1& args,
ReplSetHeartbeatResponse* response) {
{
- boost::lock_guard<boost::mutex> lock(_mutex);
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
if (_rsConfigState == kConfigPreStart || _rsConfigState == kConfigStartingUp) {
return Status(ErrorCodes::NotYetInitialized,
"Received heartbeat while still initializing replication system");
diff --git a/src/mongo/db/repl/replication_coordinator_impl.h b/src/mongo/db/repl/replication_coordinator_impl.h
index f49e2dde89e..2a5f11b410a 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_impl.h
@@ -29,14 +29,12 @@
#pragma once
#include <boost/thread.hpp>
-#include <boost/thread/mutex.hpp>
#include <boost/thread/condition_variable.hpp>
#include <vector>
#include <memory>
#include "mongo/base/status.h"
#include "mongo/bson/timestamp.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/repl/data_replicator.h"
#include "mongo/db/repl/member_state.h"
#include "mongo/db/repl/optime.h"
@@ -46,9 +44,11 @@
#include "mongo/db/repl/replication_executor.h"
#include "mongo/db/repl/storage_interface.h"
#include "mongo/db/repl/update_position_args.h"
+#include "mongo/db/service_context.h"
#include "mongo/platform/atomic_word.h"
#include "mongo/platform/unordered_map.h"
#include "mongo/platform/unordered_set.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/net/hostandport.h"
namespace mongo {
@@ -499,7 +499,7 @@ namespace repl {
*/
ReplicationCoordinator::StatusAndDuration _awaitReplication_inlock(
const Timer* timer,
- boost::unique_lock<boost::mutex>* lock,
+ stdx::unique_lock<stdx::mutex>* lock,
OperationContext* txn,
const OpTime& opTime,
const WriteConcernOptions& writeConcern);
@@ -583,7 +583,7 @@ namespace repl {
* This function has the same rules for "opTime" as setMyLastOptime(), unless
* "isRollbackAllowed" is true.
*/
- void _setMyLastOptime_inlock(boost::unique_lock<boost::mutex>* lock,
+ void _setMyLastOptime_inlock(stdx::unique_lock<stdx::mutex>* lock,
const OpTime& opTime,
bool isRollbackAllowed);
@@ -910,7 +910,7 @@ namespace repl {
// (I) Independently synchronized, see member variable comment.
// Protects member data of this ReplicationCoordinator.
- mutable boost::mutex _mutex; // (S)
+ mutable stdx::mutex _mutex; // (S)
// Handles to actively queued heartbeats.
HeartbeatHandles _heartbeatHandles; // (X)
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect.cpp
index ae9282e001e..35f5fdf9f9d 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect.cpp
@@ -88,7 +88,7 @@ namespace {
invariant(!_freshnessChecker);
invariant(!_electCmdRunner);
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
switch (_rsConfigState) {
case kConfigSteady:
break;
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
index 490ed4af6cf..686f7bbe5d3 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
@@ -91,7 +91,7 @@ namespace {
invariant(!_voteRequester);
invariant(!_freshnessChecker);
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
switch (_rsConfigState) {
case kConfigSteady:
break;
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
index 63af8b2bc03..4afdb8594e8 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
@@ -170,7 +170,7 @@ namespace {
targetIndex >= 0 &&
hbStatusResponse.getValue().hasState() &&
hbStatusResponse.getValue().getState() != MemberState::RS_PRIMARY) {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
if (hbStatusResponse.getValue().getConfigVersion() == _rsConfig.getConfigVersion()) {
_updateOpTimeFromHeartbeat_inlock(targetIndex,
hbStatusResponse.getValue().getOpTime());
@@ -209,7 +209,7 @@ namespace {
case HeartbeatResponseAction::NoAction:
// Update the cached member state if different than the current topology member state
if (_memberState != _topCoord->getMemberState()) {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
const PostMemberStateUpdateAction postUpdateAction =
_updateMemberStateFromTopologyCoordinator_inlock();
lk.unlock();
@@ -295,7 +295,7 @@ namespace {
invariant(cbData.txn);
// TODO Add invariant that we've got global shared or global exclusive lock, when supported
// by lock manager.
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
_topCoord->stepDownIfPending();
const PostMemberStateUpdateAction action =
_updateMemberStateFromTopologyCoordinator_inlock();
@@ -304,7 +304,7 @@ namespace {
}
void ReplicationCoordinatorImpl::_scheduleHeartbeatReconfig(const ReplicaSetConfig& newConfig) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_inShutdown) {
return;
}
@@ -360,7 +360,7 @@ namespace {
return;
}
fassert(18911, cbData.status);
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_inShutdown) {
return;
}
@@ -382,7 +382,7 @@ namespace {
return;
}
- boost::unique_lock<boost::mutex> lk(_mutex, boost::defer_lock_t());
+ stdx::unique_lock<stdx::mutex> lk(_mutex, stdx::defer_lock);
const StatusWith<int> myIndex = validateConfigForHeartbeatReconfig(
_externalState.get(),
@@ -459,7 +459,7 @@ namespace {
return;
}
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
invariant(_rsConfigState == kConfigHBReconfiguring);
invariant(!_rsConfig.isInitialized() ||
_rsConfig.getConfigVersion() < newConfig.getConfigVersion());
diff --git a/src/mongo/db/repl/replication_executor.cpp b/src/mongo/db/repl/replication_executor.cpp
index 9866d08a451..d7c4103f9f5 100644
--- a/src/mongo/db/repl/replication_executor.cpp
+++ b/src/mongo/db/repl/replication_executor.cpp
@@ -74,7 +74,7 @@ namespace {
ReplicationExecutor::~ReplicationExecutor() {}
std::string ReplicationExecutor::getDiagnosticString() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
return _getDiagnosticString_inlock();
}
@@ -105,7 +105,7 @@ namespace {
std::pair<WorkItem, CallbackHandle> work;
while ((work = getWork()).first.callback.isValid()) {
{
- boost::lock_guard<boost::mutex> lk(_terribleExLockSyncMutex);
+ stdx::lock_guard<stdx::mutex> lk(_terribleExLockSyncMutex);
const Callback* callback = _getCallbackFromHandle(work.first.callback);
const Status inStatus = callback->_isCanceled ?
Status(ErrorCodes::CallbackCanceled, "Callback canceled") :
@@ -125,7 +125,7 @@ namespace {
// * drain all of the unsignaled events, sleepers, and ready queue, by running those
// callbacks with a "shutdown" or "canceled" status.
// * Signal all threads blocked in waitForEvent, and wait for them to return from that method.
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_inShutdown = true;
_readyQueue.splice(_readyQueue.end(), _dbWorkInProgressQueue);
@@ -145,7 +145,7 @@ namespace {
_dblockExclusiveLockTaskRunner.cancel();
_dblockTaskRunner.cancel();
_dblockWorkers.join();
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
invariant(_inShutdown);
invariant(_dbWorkInProgressQueue.empty());
invariant(_exclusiveLockInProgressQueue.empty());
@@ -174,7 +174,7 @@ namespace {
}
StatusWith<ReplicationExecutor::EventHandle> ReplicationExecutor::makeEvent() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
return makeEvent_inlock();
}
@@ -189,7 +189,7 @@ namespace {
}
void ReplicationExecutor::signalEvent(const EventHandle& eventHandle) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
signalEvent_inlock(eventHandle);
}
@@ -214,7 +214,7 @@ namespace {
StatusWith<ReplicationExecutor::CallbackHandle> ReplicationExecutor::onEvent(
const EventHandle& eventHandle,
const CallbackFn& work) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
WorkQueue* queue = &_readyQueue;
Event* event = _getEventFromHandle(eventHandle);
if (!event->_isSignaled) {
@@ -268,7 +268,7 @@ namespace {
Callback* callback = _getCallbackFromHandle(cbHandle);
const WorkQueue::iterator iter = callback->_iter;
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_inShutdown) {
return;
}
@@ -299,7 +299,7 @@ namespace {
else {
scheduledRequest.expirationDate = _networkInterface->now() + scheduledRequest.timeout;
}
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
StatusWith<CallbackHandle> handle = enqueueWork_inlock(
&_networkInProgressQueue,
stdx::bind(remoteCommandFailedEarly,
@@ -327,7 +327,7 @@ namespace {
StatusWith<ReplicationExecutor::CallbackHandle> ReplicationExecutor::scheduleWork(
const CallbackFn& work) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_networkInterface->signalWorkAvailable();
return enqueueWork_inlock(&_readyQueue, work);
}
@@ -336,7 +336,7 @@ namespace {
Date_t when,
const CallbackFn& work) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
WorkQueue temp;
StatusWith<CallbackHandle> cbHandle = enqueueWork_inlock(&temp, work);
if (!cbHandle.isOK())
@@ -359,7 +359,7 @@ namespace {
const NamespaceString& nss,
LockMode mode) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
StatusWith<CallbackHandle> handle = enqueueWork_inlock(&_dbWorkInProgressQueue,
work);
if (handle.isOK()) {
@@ -389,8 +389,8 @@ namespace {
const Status& taskRunnerStatus,
const CallbackHandle& cbHandle,
WorkQueue* workQueue,
- boost::mutex* terribleExLockSyncMutex) {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::mutex* terribleExLockSyncMutex) {
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
if (_inShutdown)
return;
Callback* callback = _getCallbackFromHandle(cbHandle);
@@ -399,9 +399,9 @@ namespace {
_freeQueue.splice(_freeQueue.begin(), *workQueue, iter);
lk.unlock();
{
- std::unique_ptr<boost::lock_guard<boost::mutex> > terribleLock(
+ std::unique_ptr<stdx::lock_guard<stdx::mutex> > terribleLock(
terribleExLockSyncMutex ?
- new boost::lock_guard<boost::mutex>(*terribleExLockSyncMutex) :
+ new stdx::lock_guard<stdx::mutex>(*terribleExLockSyncMutex) :
nullptr);
// Only possible task runner error status is CallbackCanceled.
callback->_callbackFn(CallbackArgs(this,
@@ -420,7 +420,7 @@ namespace {
ReplicationExecutor::scheduleWorkWithGlobalExclusiveLock(
const CallbackFn& work) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
StatusWith<CallbackHandle> handle = enqueueWork_inlock(&_exclusiveLockInProgressQueue,
work);
if (handle.isOK()) {
@@ -444,7 +444,7 @@ namespace {
std::pair<ReplicationExecutor::WorkItem, ReplicationExecutor::CallbackHandle>
ReplicationExecutor::getWork() {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
while (true) {
const Date_t now = _networkInterface->now();
Date_t nextWakeupDate = scheduleReadySleepers_inlock(now);
@@ -539,7 +539,7 @@ namespace {
}
void ReplicationExecutor::Event::waitUntilSignaled() {
- boost::unique_lock<boost::mutex> lk(_executor->_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_executor->_mutex);
++_executor->_totalEventWaiters;
while (!_isSignaled) {
_isSignaledCondition.wait(lk);
@@ -549,7 +549,7 @@ namespace {
}
bool ReplicationExecutor::Event::isSignaled() {
- boost::lock_guard<boost::mutex> lk(_executor->_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_executor->_mutex);
return _isSignaled;
}
@@ -567,7 +567,7 @@ namespace {
ReplicationExecutor::Callback::~Callback() {}
void ReplicationExecutor::Callback::cancel() {
- boost::unique_lock<boost::mutex> lk(_executor->_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_executor->_mutex);
_isCanceled = true;
if (_iter->isNetworkOperation) {
lk.unlock();
diff --git a/src/mongo/db/repl/replication_executor.h b/src/mongo/db/repl/replication_executor.h
index 567cf46f67a..3e65beb0765 100644
--- a/src/mongo/db/repl/replication_executor.h
+++ b/src/mongo/db/repl/replication_executor.h
@@ -29,7 +29,6 @@
#pragma once
#include <boost/thread/condition_variable.hpp>
-#include <boost/thread/mutex.hpp>
#include <string>
#include <thread>
@@ -45,6 +44,7 @@
#include "mongo/platform/random.h"
#include "mongo/stdx/functional.h"
#include "mongo/stdx/list.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/net/hostandport.h"
#include "mongo/util/time_support.h"
@@ -285,7 +285,7 @@ namespace repl {
const Status& taskRunnerStatus,
const CallbackHandle& cbHandle,
WorkQueue* workQueue,
- boost::mutex* terribleExLockSyncMutex);
+ stdx::mutex* terribleExLockSyncMutex);
/**
* Wrapper around TaskExecutor::getCallbackFromHandle that return an Event* instead of
@@ -304,8 +304,8 @@ namespace repl {
std::unique_ptr<executor::NetworkInterface> _networkInterface;
std::unique_ptr<StorageInterface> _storageInterface;
- boost::mutex _mutex;
- boost::mutex _terribleExLockSyncMutex;
+ stdx::mutex _mutex;
+ stdx::mutex _terribleExLockSyncMutex;
boost::condition_variable _noMoreWaitingThreads;
WorkQueue _freeQueue;
WorkQueue _readyQueue;
diff --git a/src/mongo/db/repl/reporter.cpp b/src/mongo/db/repl/reporter.cpp
index e72828d3758..c3d7bd79903 100644
--- a/src/mongo/db/repl/reporter.cpp
+++ b/src/mongo/db/repl/reporter.cpp
@@ -62,7 +62,7 @@ namespace repl {
}
void Reporter::cancel() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
if (!_active) {
return;
@@ -76,7 +76,7 @@ namespace repl {
}
void Reporter::wait() {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
if (!_active) {
return;
}
@@ -86,7 +86,7 @@ namespace repl {
}
Status Reporter::trigger() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
return _schedule_inlock();
}
@@ -124,7 +124,7 @@ namespace repl {
}
void Reporter::_callback(const ReplicationExecutor::RemoteCommandCallbackArgs& rcbd) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_status = rcbd.response.getStatus();
_active = false;
@@ -139,17 +139,17 @@ namespace repl {
}
Status Reporter::getStatus() const {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
return _status;
}
bool Reporter::isActive() const {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
return _active;
}
bool Reporter::willRunAgain() const {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
return _willRunAgain;
}
} // namespace repl
diff --git a/src/mongo/db/repl/reporter.h b/src/mongo/db/repl/reporter.h
index 4dbf86c4ed5..22d2bb1fa1a 100644
--- a/src/mongo/db/repl/reporter.h
+++ b/src/mongo/db/repl/reporter.h
@@ -105,7 +105,7 @@ namespace repl {
HostAndPort _target;
// Protects member data of this Reporter.
- mutable boost::mutex _mutex;
+ mutable stdx::mutex _mutex;
// Stores the most recent Status returned from the ReplicationExecutor.
Status _status;
diff --git a/src/mongo/db/repl/sync_source_feedback.cpp b/src/mongo/db/repl/sync_source_feedback.cpp
index 960fd92ac6c..49c70c3c2b7 100644
--- a/src/mongo/db/repl/sync_source_feedback.cpp
+++ b/src/mongo/db/repl/sync_source_feedback.cpp
@@ -99,7 +99,7 @@ namespace repl {
}
void SyncSourceFeedback::forwardSlaveProgress() {
- boost::unique_lock<boost::mutex> lock(_mtx);
+ stdx::unique_lock<stdx::mutex> lock(_mtx);
_positionChanged = true;
_cond.notify_all();
}
@@ -112,7 +112,7 @@ namespace repl {
}
BSONObjBuilder cmd;
{
- boost::unique_lock<boost::mutex> lock(_mtx);
+ stdx::unique_lock<stdx::mutex> lock(_mtx);
// the command could not be created, likely because the node was removed from the set
if (!replCoord->prepareReplSetUpdatePositionCommand(&cmd)) {
return Status::OK();
@@ -150,7 +150,7 @@ namespace repl {
}
void SyncSourceFeedback::shutdown() {
- boost::unique_lock<boost::mutex> lock(_mtx);
+ stdx::unique_lock<stdx::mutex> lock(_mtx);
_shutdownSignaled = true;
_cond.notify_all();
}
@@ -161,7 +161,7 @@ namespace repl {
ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
while (true) { // breaks once _shutdownSignaled is true
{
- boost::unique_lock<boost::mutex> lock(_mtx);
+ stdx::unique_lock<stdx::mutex> lock(_mtx);
while (!_positionChanged && !_shutdownSignaled) {
_cond.wait(lock);
}
@@ -188,13 +188,13 @@ namespace repl {
// fix connection if need be
if (target.empty()) {
sleepmillis(500);
- boost::unique_lock<boost::mutex> lock(_mtx);
+ stdx::unique_lock<stdx::mutex> lock(_mtx);
_positionChanged = true;
continue;
}
if (!_connect(txn.get(), target)) {
sleepmillis(500);
- boost::unique_lock<boost::mutex> lock(_mtx);
+ stdx::unique_lock<stdx::mutex> lock(_mtx);
_positionChanged = true;
continue;
}
@@ -202,7 +202,7 @@ namespace repl {
Status status = updateUpstream(txn.get());
if (!status.isOK()) {
sleepmillis(500);
- boost::unique_lock<boost::mutex> lock(_mtx);
+ stdx::unique_lock<stdx::mutex> lock(_mtx);
_positionChanged = true;
}
}
diff --git a/src/mongo/db/repl/sync_source_feedback.h b/src/mongo/db/repl/sync_source_feedback.h
index 40d22bddb5e..900019cd3e8 100644
--- a/src/mongo/db/repl/sync_source_feedback.h
+++ b/src/mongo/db/repl/sync_source_feedback.h
@@ -29,11 +29,11 @@
#pragma once
-#include <boost/thread/mutex.hpp>
#include <boost/thread/condition.hpp>
#include "mongo/client/constants.h"
#include "mongo/client/dbclientcursor.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/net/hostandport.h"
namespace mongo {
@@ -83,7 +83,7 @@ namespace repl {
// our connection to our sync target
std::unique_ptr<DBClientConnection> _connection;
// protects cond, _shutdownSignaled, and _positionChanged.
- boost::mutex _mtx;
+ stdx::mutex _mtx;
// used to alert our thread of changes which need to be passed up the chain
boost::condition _cond;
// used to indicate a position change which has not yet been pushed along
diff --git a/src/mongo/db/repl/task_runner.cpp b/src/mongo/db/repl/task_runner.cpp
index f1b54c295d0..421fa9c9fb5 100644
--- a/src/mongo/db/repl/task_runner.cpp
+++ b/src/mongo/db/repl/task_runner.cpp
@@ -85,7 +85,7 @@ namespace {
TaskRunner::~TaskRunner() {
try {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
if (!_active) {
return;
}
@@ -101,7 +101,7 @@ namespace {
}
std::string TaskRunner::getDiagnosticString() const {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
str::stream output;
output << "TaskRunner";
output << " scheduled tasks: " << _tasks.size();
@@ -111,14 +111,14 @@ namespace {
}
bool TaskRunner::isActive() const {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
return _active;
}
void TaskRunner::schedule(const Task& task) {
invariant(task);
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_tasks.push_back(task);
_condition.notify_all();
@@ -134,7 +134,7 @@ namespace {
}
void TaskRunner::cancel() {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_cancelRequested = true;
_condition.notify_all();
}
@@ -159,7 +159,7 @@ namespace {
// Release thread back to pool after disposing if no scheduled tasks in queue.
if (nextAction == NextAction::kDisposeOperationContext ||
nextAction == NextAction::kInvalid) {
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_tasks.empty()) {
_finishRunTasks_inlock();
return;
@@ -170,7 +170,7 @@ namespace {
std::list<Task> tasks;
{
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
tasks.swap(_tasks);
}
@@ -180,7 +180,7 @@ namespace {
"this task has been canceled by a previously invoked task"));
}
- boost::lock_guard<boost::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
_finishRunTasks_inlock();
}
@@ -191,7 +191,7 @@ namespace {
}
TaskRunner::Task TaskRunner::_waitForNextTask() {
- boost::unique_lock<boost::mutex> lk(_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
while (_tasks.empty() && !_cancelRequested) {
_condition.wait(lk);
diff --git a/src/mongo/db/repl/task_runner.h b/src/mongo/db/repl/task_runner.h
index fb7985df7ca..3aa875205d1 100644
--- a/src/mongo/db/repl/task_runner.h
+++ b/src/mongo/db/repl/task_runner.h
@@ -29,11 +29,11 @@
#pragma once
#include <boost/thread/condition.hpp>
-#include <boost/thread/mutex.hpp>
#include <list>
#include "mongo/base/disallow_copying.h"
#include "mongo/stdx/functional.h"
+#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -152,7 +152,7 @@ namespace repl {
CreateOperationContextFn _createOperationContext;
// Protects member data of this TaskRunner.
- mutable boost::mutex _mutex;
+ mutable stdx::mutex _mutex;
boost::condition _condition;