summaryrefslogtreecommitdiff
path: root/src/mongo/s/client
diff options
context:
space:
mode:
authorBen Caimano <ben.caimano@mongodb.com>2019-09-17 23:22:19 +0000
committerevergreen <evergreen@mongodb.com>2019-09-17 23:22:19 +0000
commitbc11369435ca51e2ff6897433d00f6b909f6a25f (patch)
tree251653ec8285d798b41846e343e7e414e80ff277 /src/mongo/s/client
parent45aea2495306dd61fab46bd398735bb6aaf7b53a (diff)
downloadmongo-bc11369435ca51e2ff6897433d00f6b909f6a25f.tar.gz
SERVER-42165 Replace uses of stdx::mutex with mongo::Mutex
Diffstat (limited to 'src/mongo/s/client')
-rw-r--r--src/mongo/s/client/rs_local_client.cpp4
-rw-r--r--src/mongo/s/client/rs_local_client.h4
-rw-r--r--src/mongo/s/client/shard_connection.cpp8
-rw-r--r--src/mongo/s/client/shard_registry.cpp30
-rw-r--r--src/mongo/s/client/shard_registry.h8
-rw-r--r--src/mongo/s/client/shard_remote.cpp4
-rw-r--r--src/mongo/s/client/shard_remote.h5
-rw-r--r--src/mongo/s/client/version_manager.cpp10
8 files changed, 37 insertions, 36 deletions
diff --git a/src/mongo/s/client/rs_local_client.cpp b/src/mongo/s/client/rs_local_client.cpp
index fdb386a3bcf..e1ec4917f8f 100644
--- a/src/mongo/s/client/rs_local_client.cpp
+++ b/src/mongo/s/client/rs_local_client.cpp
@@ -56,7 +56,7 @@ void RSLocalClient::_updateLastOpTimeFromClient(OperationContext* opCtx,
return;
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (lastOpTimeFromClient >= _lastOpTime) {
// It's always possible for lastOpTimeFromClient to be less than _lastOpTime if another
// thread started and completed a write through this ShardLocal (updating _lastOpTime)
@@ -66,7 +66,7 @@ void RSLocalClient::_updateLastOpTimeFromClient(OperationContext* opCtx,
}
repl::OpTime RSLocalClient::_getLastOpTime() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _lastOpTime;
}
diff --git a/src/mongo/s/client/rs_local_client.h b/src/mongo/s/client/rs_local_client.h
index 7bba5c7eaa0..7dabd19d454 100644
--- a/src/mongo/s/client/rs_local_client.h
+++ b/src/mongo/s/client/rs_local_client.h
@@ -30,8 +30,8 @@
#pragma once
#include "mongo/db/repl/optime.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/client/shard.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -86,7 +86,7 @@ private:
repl::OpTime _getLastOpTime();
// Guards _lastOpTime below.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("RSLocalClient::_mutex");
// Stores the optime that was generated by the last operation to perform a write that was run
// through _runCommand. Used in _exhaustiveFindOnConfig for waiting for that optime to be
diff --git a/src/mongo/s/client/shard_connection.cpp b/src/mongo/s/client/shard_connection.cpp
index 2cc44c44a1f..1e56594331d 100644
--- a/src/mongo/s/client/shard_connection.cpp
+++ b/src/mongo/s/client/shard_connection.cpp
@@ -63,19 +63,19 @@ class ClientConnections;
class ActiveClientConnections {
public:
void add(const ClientConnections* cc) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_clientConnections.insert(cc);
}
void remove(const ClientConnections* cc) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_clientConnections.erase(cc);
}
void appendInfo(BSONObjBuilder* b) const;
private:
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ActiveClientConnections::_mutex");
std::set<const ClientConnections*> _clientConnections;
} activeClientConnections;
@@ -331,7 +331,7 @@ void ActiveClientConnections::appendInfo(BSONObjBuilder* b) const {
BSONArrayBuilder arr(64 * 1024);
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
for (const auto* conn : _clientConnections) {
BSONObjBuilder bb(arr.subobjStart());
conn->appendInfo(bb);
diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp
index 2c92c46e33e..95b3a726eff 100644
--- a/src/mongo/s/client/shard_registry.cpp
+++ b/src/mongo/s/client/shard_registry.cpp
@@ -50,13 +50,13 @@
#include "mongo/executor/task_executor.h"
#include "mongo/executor/task_executor_pool.h"
#include "mongo/executor/thread_pool_task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/metadata/egress_metadata_hook_list.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/client/shard.h"
#include "mongo/s/client/shard_factory.h"
#include "mongo/s/grid.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/log.h"
#include "mongo/util/map_util.h"
@@ -202,12 +202,12 @@ void ShardRegistry::updateReplSetHosts(const ConnectionString& newConnString) {
newConnString.type() == ConnectionString::CUSTOM); // For dbtests
// to prevent update config shard connection string during init
- stdx::unique_lock<stdx::mutex> lock(_reloadMutex);
+ stdx::unique_lock<Latch> lock(_reloadMutex);
_data.rebuildShardIfExists(newConnString, _shardFactory.get());
}
void ShardRegistry::init() {
- stdx::unique_lock<stdx::mutex> reloadLock(_reloadMutex);
+ stdx::unique_lock<Latch> reloadLock(_reloadMutex);
invariant(_initConfigServerCS.isValid());
auto configShard =
_shardFactory->createShard(ShardRegistry::kConfigServerShardId, _initConfigServerCS);
@@ -282,12 +282,12 @@ void ShardRegistry::_internalReload(const CallbackArgs& cbArgs) {
}
bool ShardRegistry::isUp() const {
- stdx::unique_lock<stdx::mutex> reloadLock(_reloadMutex);
+ stdx::unique_lock<Latch> reloadLock(_reloadMutex);
return _isUp;
}
bool ShardRegistry::reload(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> reloadLock(_reloadMutex);
+ stdx::unique_lock<Latch> reloadLock(_reloadMutex);
if (_reloadState == ReloadState::Reloading) {
// Another thread is already in the process of reloading so no need to do duplicate work.
@@ -444,7 +444,7 @@ ShardRegistryData::ShardRegistryData(OperationContext* opCtx, ShardFactory* shar
}
void ShardRegistryData::swap(ShardRegistryData& other) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_lookup.swap(other._lookup);
_rsLookup.swap(other._rsLookup);
_hostLookup.swap(other._hostLookup);
@@ -452,29 +452,29 @@ void ShardRegistryData::swap(ShardRegistryData& other) {
}
shared_ptr<Shard> ShardRegistryData::getConfigShard() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _configShard;
}
void ShardRegistryData::addConfigShard(std::shared_ptr<Shard> shard) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_configShard = shard;
_addShard(lk, shard, true);
}
shared_ptr<Shard> ShardRegistryData::findByRSName(const string& name) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto i = _rsLookup.find(name);
return (i != _rsLookup.end()) ? i->second : nullptr;
}
shared_ptr<Shard> ShardRegistryData::findByHostAndPort(const HostAndPort& hostAndPort) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return mapFindWithDefault(_hostLookup, hostAndPort);
}
shared_ptr<Shard> ShardRegistryData::findByShardId(const ShardId& shardId) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _findByShardId(lk, shardId);
}
@@ -487,7 +487,7 @@ void ShardRegistryData::toBSON(BSONObjBuilder* result) const {
// Need to copy, then sort by shardId.
std::vector<std::pair<ShardId, std::string>> shards;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
shards.reserve(_lookup.size());
for (auto&& shard : _lookup) {
shards.emplace_back(shard.first, shard.second->getConnString().toString());
@@ -503,7 +503,7 @@ void ShardRegistryData::toBSON(BSONObjBuilder* result) const {
}
void ShardRegistryData::getAllShardIds(std::set<ShardId>& seen) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (auto i = _lookup.begin(); i != _lookup.end(); ++i) {
const auto& s = i->second;
if (s->getId().toString() == "config") {
@@ -514,7 +514,7 @@ void ShardRegistryData::getAllShardIds(std::set<ShardId>& seen) const {
}
void ShardRegistryData::shardIdSetDifference(std::set<ShardId>& diff) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (auto i = _lookup.begin(); i != _lookup.end(); ++i) {
invariant(i->second);
auto res = diff.find(i->second->getId());
@@ -526,7 +526,7 @@ void ShardRegistryData::shardIdSetDifference(std::set<ShardId>& diff) const {
void ShardRegistryData::rebuildShardIfExists(const ConnectionString& newConnString,
ShardFactory* factory) {
- stdx::unique_lock<stdx::mutex> updateConnStringLock(_mutex);
+ stdx::unique_lock<Latch> updateConnStringLock(_mutex);
auto it = _rsLookup.find(newConnString.getSetName());
if (it == _rsLookup.end()) {
return;
diff --git a/src/mongo/s/client/shard_registry.h b/src/mongo/s/client/shard_registry.h
index a5917c64413..22a08cef941 100644
--- a/src/mongo/s/client/shard_registry.h
+++ b/src/mongo/s/client/shard_registry.h
@@ -36,9 +36,9 @@
#include "mongo/db/jsobj.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/client/shard.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/concurrency/with_lock.h"
@@ -114,7 +114,7 @@ private:
void _rebuildShard(WithLock, ConnectionString const& newConnString, ShardFactory* factory);
// Protects the lookup maps below.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("::_mutex");
using ShardMap = stdx::unordered_map<ShardId, std::shared_ptr<Shard>, ShardId::Hasher>;
@@ -302,7 +302,7 @@ private:
ShardRegistryData _data;
// Protects the _reloadState and _initConfigServerCS during startup.
- mutable stdx::mutex _reloadMutex;
+ mutable Mutex _reloadMutex = MONGO_MAKE_LATCH("ShardRegistry::_reloadMutex");
stdx::condition_variable _inReloadCV;
enum class ReloadState {
diff --git a/src/mongo/s/client/shard_remote.cpp b/src/mongo/s/client/shard_remote.cpp
index 25f2e1959c0..4e684d8fab9 100644
--- a/src/mongo/s/client/shard_remote.cpp
+++ b/src/mongo/s/client/shard_remote.cpp
@@ -143,7 +143,7 @@ void ShardRemote::updateReplSetMonitor(const HostAndPort& remoteHost,
}
void ShardRemote::updateLastCommittedOpTime(LogicalTime lastCommittedOpTime) {
- stdx::lock_guard<stdx::mutex> lk(_lastCommittedOpTimeMutex);
+ stdx::lock_guard<Latch> lk(_lastCommittedOpTimeMutex);
// A secondary may return a lastCommittedOpTime less than the latest seen so far.
if (lastCommittedOpTime > _lastCommittedOpTime) {
@@ -152,7 +152,7 @@ void ShardRemote::updateLastCommittedOpTime(LogicalTime lastCommittedOpTime) {
}
LogicalTime ShardRemote::getLastCommittedOpTime() const {
- stdx::lock_guard<stdx::mutex> lk(_lastCommittedOpTimeMutex);
+ stdx::lock_guard<Latch> lk(_lastCommittedOpTimeMutex);
return _lastCommittedOpTime;
}
diff --git a/src/mongo/s/client/shard_remote.h b/src/mongo/s/client/shard_remote.h
index 3b19fd8ab0f..cf1b7b2d3f8 100644
--- a/src/mongo/s/client/shard_remote.h
+++ b/src/mongo/s/client/shard_remote.h
@@ -34,7 +34,7 @@
#include "mongo/s/client/shard.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -133,7 +133,8 @@ private:
/**
* Protects _lastCommittedOpTime.
*/
- mutable stdx::mutex _lastCommittedOpTimeMutex;
+ mutable Mutex _lastCommittedOpTimeMutex =
+ MONGO_MAKE_LATCH("ShardRemote::_lastCommittedOpTimeMutex");
/**
* Logical time representing the latest opTime timestamp known to be in this shard's majority
diff --git a/src/mongo/s/client/version_manager.cpp b/src/mongo/s/client/version_manager.cpp
index 1ffed487065..27907950ac4 100644
--- a/src/mongo/s/client/version_manager.cpp
+++ b/src/mongo/s/client/version_manager.cpp
@@ -63,14 +63,14 @@ namespace {
class ConnectionShardStatus {
public:
bool hasAnySequenceSet(DBClientBase* conn) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
SequenceMap::const_iterator seenConnIt = _map.find(conn->getConnectionId());
return seenConnIt != _map.end() && seenConnIt->second.size() > 0;
}
bool getSequence(DBClientBase* conn, const string& ns, unsigned long long* sequence) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
SequenceMap::const_iterator seenConnIt = _map.find(conn->getConnectionId());
if (seenConnIt == _map.end())
@@ -85,18 +85,18 @@ public:
}
void setSequence(DBClientBase* conn, const string& ns, const unsigned long long& s) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_map[conn->getConnectionId()][ns] = s;
}
void reset(DBClientBase* conn) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_map.erase(conn->getConnectionId());
}
private:
// protects _map
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ConnectionShardStatus::_mutex");
// a map from a connection into ChunkManager's sequence number for each namespace
typedef map<unsigned long long, map<string, unsigned long long>> SequenceMap;