summaryrefslogtreecommitdiff
path: root/src/mongo/s
diff options
context:
space:
mode:
authorBen Caimano <ben.caimano@mongodb.com>2019-09-17 23:22:19 +0000
committerevergreen <evergreen@mongodb.com>2019-09-17 23:22:19 +0000
commitbc11369435ca51e2ff6897433d00f6b909f6a25f (patch)
tree251653ec8285d798b41846e343e7e414e80ff277 /src/mongo/s
parent45aea2495306dd61fab46bd398735bb6aaf7b53a (diff)
downloadmongo-bc11369435ca51e2ff6897433d00f6b909f6a25f.tar.gz
SERVER-42165 Replace uses of stdx::mutex with mongo::Mutex
Diffstat (limited to 'src/mongo/s')
-rw-r--r--src/mongo/s/balancer_configuration.cpp12
-rw-r--r--src/mongo/s/balancer_configuration.h5
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_mock.cpp40
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_mock.h4
-rw-r--r--src/mongo/s/catalog/replset_dist_lock_manager.cpp12
-rw-r--r--src/mongo/s/catalog/replset_dist_lock_manager.h6
-rw-r--r--src/mongo/s/catalog/replset_dist_lock_manager_test.cpp46
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp4
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.h4
-rw-r--r--src/mongo/s/catalog_cache.cpp30
-rw-r--r--src/mongo/s/catalog_cache.h4
-rw-r--r--src/mongo/s/chunk_writes_tracker.cpp2
-rw-r--r--src/mongo/s/chunk_writes_tracker.h4
-rw-r--r--src/mongo/s/client/rs_local_client.cpp4
-rw-r--r--src/mongo/s/client/rs_local_client.h4
-rw-r--r--src/mongo/s/client/shard_connection.cpp8
-rw-r--r--src/mongo/s/client/shard_registry.cpp30
-rw-r--r--src/mongo/s/client/shard_registry.h8
-rw-r--r--src/mongo/s/client/shard_remote.cpp4
-rw-r--r--src/mongo/s/client/shard_remote.h5
-rw-r--r--src/mongo/s/client/version_manager.cpp10
-rw-r--r--src/mongo/s/cluster_identity_loader.cpp6
-rw-r--r--src/mongo/s/cluster_identity_loader.h6
-rw-r--r--src/mongo/s/cluster_last_error_info.cpp8
-rw-r--r--src/mongo/s/cluster_last_error_info.h6
-rw-r--r--src/mongo/s/config_server_catalog_cache_loader.cpp2
-rw-r--r--src/mongo/s/config_server_catalog_cache_loader.h2
-rw-r--r--src/mongo/s/grid.cpp8
-rw-r--r--src/mongo/s/grid.h4
-rw-r--r--src/mongo/s/query/async_results_merger.cpp26
-rw-r--r--src/mongo/s/query/async_results_merger.h4
-rw-r--r--src/mongo/s/query/blocking_results_merger_test.cpp6
-rw-r--r--src/mongo/s/query/cluster_cursor_manager.cpp30
-rw-r--r--src/mongo/s/query/cluster_cursor_manager.h8
-rw-r--r--src/mongo/s/query/establish_cursors.h2
-rw-r--r--src/mongo/s/router_transactions_metrics.cpp4
-rw-r--r--src/mongo/s/router_transactions_metrics.h2
-rw-r--r--src/mongo/s/sharding_task_executor.h4
-rw-r--r--src/mongo/s/sharding_task_executor_pool_controller.h4
39 files changed, 190 insertions, 188 deletions
diff --git a/src/mongo/s/balancer_configuration.cpp b/src/mongo/s/balancer_configuration.cpp
index 9dacc96a258..ef7e27aa6a9 100644
--- a/src/mongo/s/balancer_configuration.cpp
+++ b/src/mongo/s/balancer_configuration.cpp
@@ -96,7 +96,7 @@ BalancerConfiguration::BalancerConfiguration()
BalancerConfiguration::~BalancerConfiguration() = default;
BalancerSettingsType::BalancerMode BalancerConfiguration::getBalancerMode() const {
- stdx::lock_guard<stdx::mutex> lk(_balancerSettingsMutex);
+ stdx::lock_guard<Latch> lk(_balancerSettingsMutex);
return _balancerSettings.getMode();
}
@@ -148,7 +148,7 @@ Status BalancerConfiguration::enableAutoSplit(OperationContext* opCtx, bool enab
}
bool BalancerConfiguration::shouldBalance() const {
- stdx::lock_guard<stdx::mutex> lk(_balancerSettingsMutex);
+ stdx::lock_guard<Latch> lk(_balancerSettingsMutex);
if (_balancerSettings.getMode() == BalancerSettingsType::kOff ||
_balancerSettings.getMode() == BalancerSettingsType::kAutoSplitOnly) {
return false;
@@ -158,7 +158,7 @@ bool BalancerConfiguration::shouldBalance() const {
}
bool BalancerConfiguration::shouldBalanceForAutoSplit() const {
- stdx::lock_guard<stdx::mutex> lk(_balancerSettingsMutex);
+ stdx::lock_guard<Latch> lk(_balancerSettingsMutex);
if (_balancerSettings.getMode() == BalancerSettingsType::kOff) {
return false;
}
@@ -167,12 +167,12 @@ bool BalancerConfiguration::shouldBalanceForAutoSplit() const {
}
MigrationSecondaryThrottleOptions BalancerConfiguration::getSecondaryThrottle() const {
- stdx::lock_guard<stdx::mutex> lk(_balancerSettingsMutex);
+ stdx::lock_guard<Latch> lk(_balancerSettingsMutex);
return _balancerSettings.getSecondaryThrottle();
}
bool BalancerConfiguration::waitForDelete() const {
- stdx::lock_guard<stdx::mutex> lk(_balancerSettingsMutex);
+ stdx::lock_guard<Latch> lk(_balancerSettingsMutex);
return _balancerSettings.waitForDelete();
}
@@ -214,7 +214,7 @@ Status BalancerConfiguration::_refreshBalancerSettings(OperationContext* opCtx)
return settingsObjStatus.getStatus();
}
- stdx::lock_guard<stdx::mutex> lk(_balancerSettingsMutex);
+ stdx::lock_guard<Latch> lk(_balancerSettingsMutex);
_balancerSettings = std::move(settings);
return Status::OK();
diff --git a/src/mongo/s/balancer_configuration.h b/src/mongo/s/balancer_configuration.h
index 7bea190a61e..10b174e43e0 100644
--- a/src/mongo/s/balancer_configuration.h
+++ b/src/mongo/s/balancer_configuration.h
@@ -34,8 +34,8 @@
#include <cstdint>
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/request_types/migration_secondary_throttle_options.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -283,7 +283,8 @@ private:
Status _refreshAutoSplitSettings(OperationContext* opCtx);
// The latest read balancer settings and a mutex to protect its swaps
- mutable stdx::mutex _balancerSettingsMutex;
+ mutable Mutex _balancerSettingsMutex =
+ MONGO_MAKE_LATCH("BalancerConfiguration::_balancerSettingsMutex");
BalancerSettingsType _balancerSettings;
// Max chunk size after which a chunk would be considered jumbo and won't be moved. This value
diff --git a/src/mongo/s/catalog/dist_lock_catalog_mock.cpp b/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
index 5dae286da5a..1a22526aa14 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
+++ b/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
@@ -132,7 +132,7 @@ StatusWith<LockpingsType> DistLockCatalogMock::getPing(OperationContext* opCtx,
GetPingFunc checkerFunc = noGetPingSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _getPingReturnValue;
checkerFunc = _getPingChecker;
}
@@ -146,7 +146,7 @@ Status DistLockCatalogMock::ping(OperationContext* opCtx, StringData processID,
PingFunc checkerFunc = noPingFuncSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _pingReturnValue;
checkerFunc = _pingChecker;
}
@@ -167,7 +167,7 @@ StatusWith<LocksType> DistLockCatalogMock::grabLock(OperationContext* opCtx,
GrabLockFunc checkerFunc = noGrabLockFuncSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _grabLockReturnValue;
checkerFunc = _grabLockChecker;
}
@@ -188,7 +188,7 @@ StatusWith<LocksType> DistLockCatalogMock::overtakeLock(OperationContext* opCtx,
OvertakeLockFunc checkerFunc = noOvertakeLockFuncSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _overtakeLockReturnValue;
checkerFunc = _overtakeLockChecker;
}
@@ -202,7 +202,7 @@ Status DistLockCatalogMock::unlock(OperationContext* opCtx, const OID& lockSessi
UnlockFunc checkerFunc = noUnLockFuncSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _unlockReturnValue;
checkerFunc = _unlockChecker;
}
@@ -218,7 +218,7 @@ Status DistLockCatalogMock::unlock(OperationContext* opCtx,
UnlockFunc checkerFunc = noUnLockFuncSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _unlockReturnValue;
checkerFunc = _unlockChecker;
}
@@ -234,7 +234,7 @@ StatusWith<DistLockCatalog::ServerInfo> DistLockCatalogMock::getServerInfo(
GetServerInfoFunc checkerFunc = noGetServerInfoSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _getServerInfoReturnValue;
checkerFunc = _getServerInfoChecker;
}
@@ -249,7 +249,7 @@ StatusWith<LocksType> DistLockCatalogMock::getLockByTS(OperationContext* opCtx,
GetLockByTSFunc checkerFunc = noGetLockByTSSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _getLockByTSReturnValue;
checkerFunc = _getLockByTSChecker;
}
@@ -263,7 +263,7 @@ StatusWith<LocksType> DistLockCatalogMock::getLockByName(OperationContext* opCtx
GetLockByNameFunc checkerFunc = noGetLockByNameSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _getLockByNameReturnValue;
checkerFunc = _getLockByNameChecker;
}
@@ -277,7 +277,7 @@ Status DistLockCatalogMock::stopPing(OperationContext* opCtx, StringData process
StopPingFunc checkerFunc = noStopPingFuncSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _stopPingReturnValue;
checkerFunc = _stopPingChecker;
}
@@ -288,67 +288,67 @@ Status DistLockCatalogMock::stopPing(OperationContext* opCtx, StringData process
void DistLockCatalogMock::expectGrabLock(DistLockCatalogMock::GrabLockFunc checkerFunc,
StatusWith<LocksType> returnThis) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_grabLockChecker = checkerFunc;
_grabLockReturnValue = returnThis;
}
void DistLockCatalogMock::expectNoGrabLock() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_grabLockChecker = noGrabLockFuncSet;
_grabLockReturnValue = kLocksTypeBadRetValue;
}
void DistLockCatalogMock::expectUnLock(DistLockCatalogMock::UnlockFunc checkerFunc,
Status returnThis) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_unlockChecker = checkerFunc;
_unlockReturnValue = returnThis;
}
void DistLockCatalogMock::expectPing(DistLockCatalogMock::PingFunc checkerFunc, Status returnThis) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_pingChecker = checkerFunc;
_pingReturnValue = returnThis;
}
void DistLockCatalogMock::expectStopPing(StopPingFunc checkerFunc, Status returnThis) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_stopPingChecker = checkerFunc;
_stopPingReturnValue = returnThis;
}
void DistLockCatalogMock::expectGetLockByTS(GetLockByTSFunc checkerFunc,
StatusWith<LocksType> returnThis) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_getLockByTSChecker = checkerFunc;
_getLockByTSReturnValue = returnThis;
}
void DistLockCatalogMock::expectGetLockByName(GetLockByNameFunc checkerFunc,
StatusWith<LocksType> returnThis) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_getLockByNameChecker = checkerFunc;
_getLockByNameReturnValue = returnThis;
}
void DistLockCatalogMock::expectOvertakeLock(OvertakeLockFunc checkerFunc,
StatusWith<LocksType> returnThis) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_overtakeLockChecker = checkerFunc;
_overtakeLockReturnValue = returnThis;
}
void DistLockCatalogMock::expectGetPing(GetPingFunc checkerFunc,
StatusWith<LockpingsType> returnThis) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_getPingChecker = checkerFunc;
_getPingReturnValue = returnThis;
}
void DistLockCatalogMock::expectGetServerInfo(GetServerInfoFunc checkerFunc,
StatusWith<DistLockCatalog::ServerInfo> returnThis) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_getServerInfoChecker = checkerFunc;
_getServerInfoReturnValue = returnThis;
}
diff --git a/src/mongo/s/catalog/dist_lock_catalog_mock.h b/src/mongo/s/catalog/dist_lock_catalog_mock.h
index faae634b09e..1eab733dc85 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_mock.h
+++ b/src/mongo/s/catalog/dist_lock_catalog_mock.h
@@ -32,10 +32,10 @@
#include <functional>
#include "mongo/base/status_with.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/catalog/dist_lock_catalog.h"
#include "mongo/s/catalog/type_lockpings.h"
#include "mongo/s/catalog/type_locks.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -191,7 +191,7 @@ public:
private:
// Protects all the member variables.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("DistLockCatalogMock::_mutex");
GrabLockFunc _grabLockChecker;
StatusWith<LocksType> _grabLockReturnValue;
diff --git a/src/mongo/s/catalog/replset_dist_lock_manager.cpp b/src/mongo/s/catalog/replset_dist_lock_manager.cpp
index 860b464939d..7a422fea7ff 100644
--- a/src/mongo/s/catalog/replset_dist_lock_manager.cpp
+++ b/src/mongo/s/catalog/replset_dist_lock_manager.cpp
@@ -94,7 +94,7 @@ void ReplSetDistLockManager::startUp() {
void ReplSetDistLockManager::shutDown(OperationContext* opCtx) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_isShutDown = true;
_shutDownCV.notify_all();
}
@@ -118,7 +118,7 @@ std::string ReplSetDistLockManager::getProcessID() {
}
bool ReplSetDistLockManager::isShutDown() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _isShutDown;
}
@@ -147,7 +147,7 @@ void ReplSetDistLockManager::doTask() {
std::deque<std::pair<DistLockHandle, boost::optional<std::string>>> toUnlockBatch;
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
toUnlockBatch.swap(_unlockList);
}
@@ -179,7 +179,7 @@ void ReplSetDistLockManager::doTask() {
}
MONGO_IDLE_THREAD_BLOCK;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_shutDownCV.wait_for(lk, _pingInterval.toSystemDuration(), [this] { return _isShutDown; });
}
}
@@ -222,7 +222,7 @@ StatusWith<bool> ReplSetDistLockManager::isLockExpired(OperationContext* opCtx,
const auto& serverInfo = serverInfoStatus.getValue();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto pingIter = _pingHistory.find(lockDoc.getName());
if (pingIter == _pingHistory.end()) {
@@ -504,7 +504,7 @@ Status ReplSetDistLockManager::checkStatus(OperationContext* opCtx,
void ReplSetDistLockManager::queueUnlock(const DistLockHandle& lockSessionID,
const boost::optional<std::string>& name) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_unlockList.push_back(std::make_pair(lockSessionID, name));
}
diff --git a/src/mongo/s/catalog/replset_dist_lock_manager.h b/src/mongo/s/catalog/replset_dist_lock_manager.h
index 1814bd96677..38176244f33 100644
--- a/src/mongo/s/catalog/replset_dist_lock_manager.h
+++ b/src/mongo/s/catalog/replset_dist_lock_manager.h
@@ -34,12 +34,12 @@
#include <string>
#include "mongo/base/string_data.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/catalog/dist_lock_catalog.h"
#include "mongo/s/catalog/dist_lock_manager.h"
#include "mongo/s/catalog/dist_lock_ping_info.h"
#include "mongo/stdx/chrono.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/stdx/unordered_map.h"
@@ -132,7 +132,7 @@ private:
const Milliseconds _pingInterval; // (I)
const Milliseconds _lockExpiration; // (I)
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ReplSetDistLockManager::_mutex");
std::unique_ptr<stdx::thread> _execThread; // (S)
// Contains the list of locks queued for unlocking. Cases when unlock operation can
diff --git a/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp b/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp
index eac536aca42..89a420f50d5 100644
--- a/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp
+++ b/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp
@@ -36,6 +36,8 @@
#include <vector>
#include "mongo/bson/json.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/balancer_configuration.h"
#include "mongo/s/catalog/dist_lock_catalog_mock.h"
#include "mongo/s/catalog/replset_dist_lock_manager.h"
@@ -44,8 +46,6 @@
#include "mongo/s/catalog/type_locks.h"
#include "mongo/s/grid.h"
#include "mongo/s/shard_server_test_fixture.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/system_tick_source.h"
#include "mongo/util/tick_source_mock.h"
@@ -413,7 +413,7 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockFailsAfterRetry) {
getMockCatalog()->expectGetLockByName([](StringData) {},
{ErrorCodes::LockNotFound, "not found!"});
- stdx::mutex unlockMutex;
+ auto unlockMutex = MONGO_MAKE_LATCH();
stdx::condition_variable unlockCV;
OID unlockSessionIDPassed;
int unlockCallCount = 0;
@@ -421,7 +421,7 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockFailsAfterRetry) {
getMockCatalog()->expectUnLock(
[&unlockMutex, &unlockCV, &unlockCallCount, &unlockSessionIDPassed](
const OID& lockSessionID) {
- stdx::unique_lock<stdx::mutex> lk(unlockMutex);
+ stdx::unique_lock<Latch> lk(unlockMutex);
unlockCallCount++;
unlockSessionIDPassed = lockSessionID;
unlockCV.notify_all();
@@ -435,7 +435,7 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockFailsAfterRetry) {
bool didTimeout = false;
{
- stdx::unique_lock<stdx::mutex> lk(unlockMutex);
+ stdx::unique_lock<Latch> lk(unlockMutex);
if (unlockCallCount == 0) {
didTimeout =
unlockCV.wait_for(lk, kJoinTimeout.toSystemDuration()) == stdx::cv_status::timeout;
@@ -558,7 +558,7 @@ TEST_F(ReplSetDistLockManagerFixture, MustUnlockOnLockError) {
},
{ErrorCodes::ExceededMemoryLimit, "bad remote server"});
- stdx::mutex unlockMutex;
+ auto unlockMutex = MONGO_MAKE_LATCH();
stdx::condition_variable unlockCV;
int unlockCallCount = 0;
OID unlockSessionIDPassed;
@@ -566,7 +566,7 @@ TEST_F(ReplSetDistLockManagerFixture, MustUnlockOnLockError) {
getMockCatalog()->expectUnLock(
[&unlockMutex, &unlockCV, &unlockCallCount, &unlockSessionIDPassed](
const OID& lockSessionID) {
- stdx::unique_lock<stdx::mutex> lk(unlockMutex);
+ stdx::unique_lock<Latch> lk(unlockMutex);
unlockCallCount++;
unlockSessionIDPassed = lockSessionID;
unlockCV.notify_all();
@@ -580,7 +580,7 @@ TEST_F(ReplSetDistLockManagerFixture, MustUnlockOnLockError) {
bool didTimeout = false;
{
- stdx::unique_lock<stdx::mutex> lk(unlockMutex);
+ stdx::unique_lock<Latch> lk(unlockMutex);
if (unlockCallCount == 0) {
didTimeout =
unlockCV.wait_for(lk, kJoinTimeout.toSystemDuration()) == stdx::cv_status::timeout;
@@ -609,13 +609,13 @@ TEST_F(ReplSetDistLockManagerFixture, MustUnlockOnLockError) {
* 3. Check that correct process is being pinged.
*/
TEST_F(ReplSetDistLockManagerFixture, LockPinging) {
- stdx::mutex testMutex;
+ auto testMutex = MONGO_MAKE_LATCH();
stdx::condition_variable ping3TimesCV;
std::vector<std::string> processIDList;
getMockCatalog()->expectPing(
[&testMutex, &ping3TimesCV, &processIDList](StringData processIDArg, Date_t ping) {
- stdx::lock_guard<stdx::mutex> lk(testMutex);
+ stdx::lock_guard<Latch> lk(testMutex);
processIDList.push_back(processIDArg.toString());
if (processIDList.size() >= 3) {
@@ -626,7 +626,7 @@ TEST_F(ReplSetDistLockManagerFixture, LockPinging) {
bool didTimeout = false;
{
- stdx::unique_lock<stdx::mutex> lk(testMutex);
+ stdx::unique_lock<Latch> lk(testMutex);
if (processIDList.size() < 3) {
didTimeout = ping3TimesCV.wait_for(lk, kJoinTimeout.toSystemDuration()) ==
stdx::cv_status::timeout;
@@ -659,7 +659,7 @@ TEST_F(ReplSetDistLockManagerFixture, LockPinging) {
* 4. Check that lockSessionID used on all unlock is the same as the one used to grab lock.
*/
TEST_F(ReplSetDistLockManagerFixture, UnlockUntilNoError) {
- stdx::mutex unlockMutex;
+ auto unlockMutex = MONGO_MAKE_LATCH();
stdx::condition_variable unlockCV;
const unsigned int kUnlockErrorCount = 3;
std::vector<OID> lockSessionIDPassed;
@@ -667,13 +667,13 @@ TEST_F(ReplSetDistLockManagerFixture, UnlockUntilNoError) {
getMockCatalog()->expectUnLock(
[this, &unlockMutex, &unlockCV, &kUnlockErrorCount, &lockSessionIDPassed](
const OID& lockSessionID) {
- stdx::unique_lock<stdx::mutex> lk(unlockMutex);
+ stdx::unique_lock<Latch> lk(unlockMutex);
lockSessionIDPassed.push_back(lockSessionID);
if (lockSessionIDPassed.size() >= kUnlockErrorCount) {
getMockCatalog()->expectUnLock(
[&lockSessionIDPassed, &unlockMutex, &unlockCV](const OID& lockSessionID) {
- stdx::unique_lock<stdx::mutex> lk(unlockMutex);
+ stdx::unique_lock<Latch> lk(unlockMutex);
lockSessionIDPassed.push_back(lockSessionID);
unlockCV.notify_all();
},
@@ -705,7 +705,7 @@ TEST_F(ReplSetDistLockManagerFixture, UnlockUntilNoError) {
bool didTimeout = false;
{
- stdx::unique_lock<stdx::mutex> lk(unlockMutex);
+ stdx::unique_lock<Latch> lk(unlockMutex);
if (lockSessionIDPassed.size() < kUnlockErrorCount) {
didTimeout =
unlockCV.wait_for(lk, kJoinTimeout.toSystemDuration()) == stdx::cv_status::timeout;
@@ -739,7 +739,7 @@ TEST_F(ReplSetDistLockManagerFixture, UnlockUntilNoError) {
* 5. Check that the lock session id used when lock was called matches with unlock.
*/
TEST_F(ReplSetDistLockManagerFixture, MultipleQueuedUnlock) {
- stdx::mutex testMutex;
+ auto testMutex = MONGO_MAKE_LATCH();
stdx::condition_variable unlockCV;
std::vector<OID> lockSessionIDPassed;
std::map<OID, int> unlockIDMap; // id -> count
@@ -761,14 +761,14 @@ TEST_F(ReplSetDistLockManagerFixture, MultipleQueuedUnlock) {
getMockCatalog()->expectUnLock(
[this, &unlockIDMap, &testMutex, &unlockCV, &mapEntriesGreaterThanTwo](
const OID& lockSessionID) {
- stdx::unique_lock<stdx::mutex> lk(testMutex);
+ stdx::unique_lock<Latch> lk(testMutex);
unlockIDMap[lockSessionID]++;
// Wait until we see at least 2 unique lockSessionID more than twice.
if (unlockIDMap.size() >= 2 && mapEntriesGreaterThanTwo(unlockIDMap)) {
getMockCatalog()->expectUnLock(
[&testMutex, &unlockCV](const OID& lockSessionID) {
- stdx::unique_lock<stdx::mutex> lk(testMutex);
+ stdx::unique_lock<Latch> lk(testMutex);
unlockCV.notify_all();
},
Status::OK());
@@ -792,7 +792,7 @@ TEST_F(ReplSetDistLockManagerFixture, MultipleQueuedUnlock) {
StringData processId,
Date_t time,
StringData why) {
- stdx::unique_lock<stdx::mutex> lk(testMutex);
+ stdx::unique_lock<Latch> lk(testMutex);
lockSessionIDPassed.push_back(lockSessionIDArg);
},
retLockDoc);
@@ -804,7 +804,7 @@ TEST_F(ReplSetDistLockManagerFixture, MultipleQueuedUnlock) {
bool didTimeout = false;
{
- stdx::unique_lock<stdx::mutex> lk(testMutex);
+ stdx::unique_lock<Latch> lk(testMutex);
if (unlockIDMap.size() < 2 || !mapEntriesGreaterThanTwo(unlockIDMap)) {
didTimeout =
@@ -1739,11 +1739,11 @@ TEST_F(ReplSetDistLockManagerFixture, LockOvertakingResultsInError) {
OID unlockSessionIDPassed;
- stdx::mutex unlockMutex;
+ auto unlockMutex = MONGO_MAKE_LATCH();
stdx::condition_variable unlockCV;
getMockCatalog()->expectUnLock(
[&unlockSessionIDPassed, &unlockMutex, &unlockCV](const OID& lockSessionID) {
- stdx::unique_lock<stdx::mutex> lk(unlockMutex);
+ stdx::unique_lock<Latch> lk(unlockMutex);
unlockSessionIDPassed = lockSessionID;
unlockCV.notify_all();
},
@@ -1756,7 +1756,7 @@ TEST_F(ReplSetDistLockManagerFixture, LockOvertakingResultsInError) {
bool didTimeout = false;
{
- stdx::unique_lock<stdx::mutex> lk(unlockMutex);
+ stdx::unique_lock<Latch> lk(unlockMutex);
if (!unlockSessionIDPassed.isSet()) {
didTimeout =
unlockCV.wait_for(lk, kJoinTimeout.toSystemDuration()) == stdx::cv_status::timeout;
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index a73251cd98c..ddb160f3ceb 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -144,7 +144,7 @@ ShardingCatalogClientImpl::ShardingCatalogClientImpl(
ShardingCatalogClientImpl::~ShardingCatalogClientImpl() = default;
void ShardingCatalogClientImpl::startup() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_started) {
return;
}
@@ -156,7 +156,7 @@ void ShardingCatalogClientImpl::startup() {
void ShardingCatalogClientImpl::shutDown(OperationContext* opCtx) {
LOG(1) << "ShardingCatalogClientImpl::shutDown() called.";
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_inShutdown = true;
}
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.h b/src/mongo/s/catalog/sharding_catalog_client_impl.h
index 3d1446e4805..269305ae323 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.h
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.h
@@ -31,9 +31,9 @@
#include "mongo/client/connection_string.h"
#include "mongo/db/repl/optime.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -216,7 +216,7 @@ private:
// (R) Read only, can only be written during initialization.
//
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ShardingCatalogClientImpl::_mutex");
// Distributed lock manager singleton.
std::unique_ptr<DistLockManager> _distLockManager; // (R)
diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp
index 4a3d3c00849..918e2cb1f2e 100644
--- a/src/mongo/s/catalog_cache.cpp
+++ b/src/mongo/s/catalog_cache.cpp
@@ -127,7 +127,7 @@ StatusWith<CachedDatabaseInfo> CatalogCache::getDatabase(OperationContext* opCtx
"SERVER-37398.");
try {
while (true) {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
auto& dbEntry = _databases[dbName];
if (!dbEntry) {
@@ -217,7 +217,7 @@ CatalogCache::RefreshResult CatalogCache::_getCollectionRoutingInfoAt(
const auto dbInfo = std::move(swDbInfo.getValue());
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
const auto itDb = _collectionsByDb.find(nss.db());
if (itDb == _collectionsByDb.end()) {
@@ -312,7 +312,7 @@ StatusWith<CachedCollectionRoutingInfo> CatalogCache::getShardedCollectionRoutin
void CatalogCache::onStaleDatabaseVersion(const StringData dbName,
const DatabaseVersion& databaseVersion) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
const auto itDbEntry = _databases.find(dbName);
if (itDbEntry == _databases.end()) {
@@ -345,7 +345,7 @@ void CatalogCache::onStaleShardVersion(CachedCollectionRoutingInfo&& ccriToInval
// We received StaleShardVersion for a collection we thought was sharded. Either a migration
// occurred to or from a shard we contacted, or the collection was dropped.
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
const auto nss = ccri._cm->getns();
const auto itDb = _collectionsByDb.find(nss.db());
@@ -369,7 +369,7 @@ void CatalogCache::onStaleShardVersion(CachedCollectionRoutingInfo&& ccriToInval
void CatalogCache::checkEpochOrThrow(const NamespaceString& nss,
ChunkVersion targetCollectionVersion) const {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
const auto itDb = _collectionsByDb.find(nss.db());
uassert(StaleConfigInfo(nss, targetCollectionVersion, boost::none),
str::stream() << "could not act as router for " << nss.ns()
@@ -397,7 +397,7 @@ void CatalogCache::checkEpochOrThrow(const NamespaceString& nss,
}
void CatalogCache::invalidateDatabaseEntry(const StringData dbName) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
auto itDbEntry = _databases.find(dbName);
if (itDbEntry == _databases.end()) {
// The database was dropped.
@@ -407,7 +407,7 @@ void CatalogCache::invalidateDatabaseEntry(const StringData dbName) {
}
void CatalogCache::invalidateShardedCollection(const NamespaceString& nss) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
auto itDb = _collectionsByDb.find(nss.db());
if (itDb == _collectionsByDb.end()) {
@@ -421,7 +421,7 @@ void CatalogCache::invalidateShardedCollection(const NamespaceString& nss) {
}
void CatalogCache::invalidateEntriesThatReferenceShard(const ShardId& shardId) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
log() << "Starting to invalidate databases and collections with data on shard: " << shardId;
@@ -461,7 +461,7 @@ void CatalogCache::invalidateEntriesThatReferenceShard(const ShardId& shardId) {
}
void CatalogCache::purgeCollection(const NamespaceString& nss) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
auto itDb = _collectionsByDb.find(nss.db());
if (itDb == _collectionsByDb.end()) {
@@ -472,13 +472,13 @@ void CatalogCache::purgeCollection(const NamespaceString& nss) {
}
void CatalogCache::purgeDatabase(StringData dbName) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_databases.erase(dbName);
_collectionsByDb.erase(dbName);
}
void CatalogCache::purgeAllDatabases() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_databases.clear();
_collectionsByDb.clear();
}
@@ -489,7 +489,7 @@ void CatalogCache::report(BSONObjBuilder* builder) const {
size_t numDatabaseEntries;
size_t numCollectionEntries{0};
{
- stdx::lock_guard<stdx::mutex> ul(_mutex);
+ stdx::lock_guard<Latch> ul(_mutex);
numDatabaseEntries = _databases.size();
for (const auto& entry : _collectionsByDb) {
numCollectionEntries += entry.second.size();
@@ -546,7 +546,7 @@ void CatalogCache::_scheduleDatabaseRefresh(WithLock lk,
const auto refreshCallback = [ this, dbName, dbEntry, onRefreshFailed, onRefreshCompleted ](
OperationContext * opCtx, StatusWith<DatabaseType> swDbt) noexcept {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (!swDbt.isOK()) {
onRefreshFailed(lg, swDbt.getStatus());
@@ -657,12 +657,12 @@ void CatalogCache::_scheduleCollectionRefresh(WithLock lk,
onRefreshCompleted(Status::OK(), newRoutingInfo.get());
} catch (const DBException& ex) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
onRefreshFailed(lg, ex.toStatus());
return;
}
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
collEntry->needsRefresh = false;
collEntry->refreshCompletionNotification->set(Status::OK());
diff --git a/src/mongo/s/catalog_cache.h b/src/mongo/s/catalog_cache.h
index a087f02802c..fe2f1f60400 100644
--- a/src/mongo/s/catalog_cache.h
+++ b/src/mongo/s/catalog_cache.h
@@ -33,12 +33,12 @@
#include "mongo/base/string_data.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog_cache_loader.h"
#include "mongo/s/chunk_manager.h"
#include "mongo/s/client/shard.h"
#include "mongo/s/database_version_gen.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/notification.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/string_map.h"
@@ -394,7 +394,7 @@ private:
using CollectionsByDbMap = StringMap<CollectionInfoMap>;
// Mutex to serialize access to the structures below
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("CatalogCache::_mutex");
// Map from DB name to the info for that database
DatabaseInfoMap _databases;
diff --git a/src/mongo/s/chunk_writes_tracker.cpp b/src/mongo/s/chunk_writes_tracker.cpp
index 807c526532d..abb20746650 100644
--- a/src/mongo/s/chunk_writes_tracker.cpp
+++ b/src/mongo/s/chunk_writes_tracker.cpp
@@ -52,7 +52,7 @@ bool ChunkWritesTracker::shouldSplit(uint64_t maxChunkSize) {
}
bool ChunkWritesTracker::acquireSplitLock() {
- stdx::lock_guard<stdx::mutex> lk(_mtx);
+ stdx::lock_guard<Latch> lk(_mtx);
if (!_isLockedForSplitting) {
_isLockedForSplitting = true;
diff --git a/src/mongo/s/chunk_writes_tracker.h b/src/mongo/s/chunk_writes_tracker.h
index 141879375c4..85309c5b205 100644
--- a/src/mongo/s/chunk_writes_tracker.h
+++ b/src/mongo/s/chunk_writes_tracker.h
@@ -30,7 +30,7 @@
#pragma once
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -89,7 +89,7 @@ private:
/**
* Protects _splitState when starting a split.
*/
- stdx::mutex _mtx;
+ Mutex _mtx = MONGO_MAKE_LATCH("ChunkWritesTracker::_mtx");
/**
* Whether or not a current split is in progress for this chunk.
diff --git a/src/mongo/s/client/rs_local_client.cpp b/src/mongo/s/client/rs_local_client.cpp
index fdb386a3bcf..e1ec4917f8f 100644
--- a/src/mongo/s/client/rs_local_client.cpp
+++ b/src/mongo/s/client/rs_local_client.cpp
@@ -56,7 +56,7 @@ void RSLocalClient::_updateLastOpTimeFromClient(OperationContext* opCtx,
return;
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (lastOpTimeFromClient >= _lastOpTime) {
// It's always possible for lastOpTimeFromClient to be less than _lastOpTime if another
// thread started and completed a write through this ShardLocal (updating _lastOpTime)
@@ -66,7 +66,7 @@ void RSLocalClient::_updateLastOpTimeFromClient(OperationContext* opCtx,
}
repl::OpTime RSLocalClient::_getLastOpTime() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _lastOpTime;
}
diff --git a/src/mongo/s/client/rs_local_client.h b/src/mongo/s/client/rs_local_client.h
index 7bba5c7eaa0..7dabd19d454 100644
--- a/src/mongo/s/client/rs_local_client.h
+++ b/src/mongo/s/client/rs_local_client.h
@@ -30,8 +30,8 @@
#pragma once
#include "mongo/db/repl/optime.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/client/shard.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -86,7 +86,7 @@ private:
repl::OpTime _getLastOpTime();
// Guards _lastOpTime below.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("RSLocalClient::_mutex");
// Stores the optime that was generated by the last operation to perform a write that was run
// through _runCommand. Used in _exhaustiveFindOnConfig for waiting for that optime to be
diff --git a/src/mongo/s/client/shard_connection.cpp b/src/mongo/s/client/shard_connection.cpp
index 2cc44c44a1f..1e56594331d 100644
--- a/src/mongo/s/client/shard_connection.cpp
+++ b/src/mongo/s/client/shard_connection.cpp
@@ -63,19 +63,19 @@ class ClientConnections;
class ActiveClientConnections {
public:
void add(const ClientConnections* cc) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_clientConnections.insert(cc);
}
void remove(const ClientConnections* cc) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_clientConnections.erase(cc);
}
void appendInfo(BSONObjBuilder* b) const;
private:
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ActiveClientConnections::_mutex");
std::set<const ClientConnections*> _clientConnections;
} activeClientConnections;
@@ -331,7 +331,7 @@ void ActiveClientConnections::appendInfo(BSONObjBuilder* b) const {
BSONArrayBuilder arr(64 * 1024);
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
for (const auto* conn : _clientConnections) {
BSONObjBuilder bb(arr.subobjStart());
conn->appendInfo(bb);
diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp
index 2c92c46e33e..95b3a726eff 100644
--- a/src/mongo/s/client/shard_registry.cpp
+++ b/src/mongo/s/client/shard_registry.cpp
@@ -50,13 +50,13 @@
#include "mongo/executor/task_executor.h"
#include "mongo/executor/task_executor_pool.h"
#include "mongo/executor/thread_pool_task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/metadata/egress_metadata_hook_list.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/client/shard.h"
#include "mongo/s/client/shard_factory.h"
#include "mongo/s/grid.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/log.h"
#include "mongo/util/map_util.h"
@@ -202,12 +202,12 @@ void ShardRegistry::updateReplSetHosts(const ConnectionString& newConnString) {
newConnString.type() == ConnectionString::CUSTOM); // For dbtests
// to prevent update config shard connection string during init
- stdx::unique_lock<stdx::mutex> lock(_reloadMutex);
+ stdx::unique_lock<Latch> lock(_reloadMutex);
_data.rebuildShardIfExists(newConnString, _shardFactory.get());
}
void ShardRegistry::init() {
- stdx::unique_lock<stdx::mutex> reloadLock(_reloadMutex);
+ stdx::unique_lock<Latch> reloadLock(_reloadMutex);
invariant(_initConfigServerCS.isValid());
auto configShard =
_shardFactory->createShard(ShardRegistry::kConfigServerShardId, _initConfigServerCS);
@@ -282,12 +282,12 @@ void ShardRegistry::_internalReload(const CallbackArgs& cbArgs) {
}
bool ShardRegistry::isUp() const {
- stdx::unique_lock<stdx::mutex> reloadLock(_reloadMutex);
+ stdx::unique_lock<Latch> reloadLock(_reloadMutex);
return _isUp;
}
bool ShardRegistry::reload(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> reloadLock(_reloadMutex);
+ stdx::unique_lock<Latch> reloadLock(_reloadMutex);
if (_reloadState == ReloadState::Reloading) {
// Another thread is already in the process of reloading so no need to do duplicate work.
@@ -444,7 +444,7 @@ ShardRegistryData::ShardRegistryData(OperationContext* opCtx, ShardFactory* shar
}
void ShardRegistryData::swap(ShardRegistryData& other) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_lookup.swap(other._lookup);
_rsLookup.swap(other._rsLookup);
_hostLookup.swap(other._hostLookup);
@@ -452,29 +452,29 @@ void ShardRegistryData::swap(ShardRegistryData& other) {
}
shared_ptr<Shard> ShardRegistryData::getConfigShard() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _configShard;
}
void ShardRegistryData::addConfigShard(std::shared_ptr<Shard> shard) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_configShard = shard;
_addShard(lk, shard, true);
}
shared_ptr<Shard> ShardRegistryData::findByRSName(const string& name) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto i = _rsLookup.find(name);
return (i != _rsLookup.end()) ? i->second : nullptr;
}
shared_ptr<Shard> ShardRegistryData::findByHostAndPort(const HostAndPort& hostAndPort) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return mapFindWithDefault(_hostLookup, hostAndPort);
}
shared_ptr<Shard> ShardRegistryData::findByShardId(const ShardId& shardId) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _findByShardId(lk, shardId);
}
@@ -487,7 +487,7 @@ void ShardRegistryData::toBSON(BSONObjBuilder* result) const {
// Need to copy, then sort by shardId.
std::vector<std::pair<ShardId, std::string>> shards;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
shards.reserve(_lookup.size());
for (auto&& shard : _lookup) {
shards.emplace_back(shard.first, shard.second->getConnString().toString());
@@ -503,7 +503,7 @@ void ShardRegistryData::toBSON(BSONObjBuilder* result) const {
}
void ShardRegistryData::getAllShardIds(std::set<ShardId>& seen) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (auto i = _lookup.begin(); i != _lookup.end(); ++i) {
const auto& s = i->second;
if (s->getId().toString() == "config") {
@@ -514,7 +514,7 @@ void ShardRegistryData::getAllShardIds(std::set<ShardId>& seen) const {
}
void ShardRegistryData::shardIdSetDifference(std::set<ShardId>& diff) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (auto i = _lookup.begin(); i != _lookup.end(); ++i) {
invariant(i->second);
auto res = diff.find(i->second->getId());
@@ -526,7 +526,7 @@ void ShardRegistryData::shardIdSetDifference(std::set<ShardId>& diff) const {
void ShardRegistryData::rebuildShardIfExists(const ConnectionString& newConnString,
ShardFactory* factory) {
- stdx::unique_lock<stdx::mutex> updateConnStringLock(_mutex);
+ stdx::unique_lock<Latch> updateConnStringLock(_mutex);
auto it = _rsLookup.find(newConnString.getSetName());
if (it == _rsLookup.end()) {
return;
diff --git a/src/mongo/s/client/shard_registry.h b/src/mongo/s/client/shard_registry.h
index a5917c64413..22a08cef941 100644
--- a/src/mongo/s/client/shard_registry.h
+++ b/src/mongo/s/client/shard_registry.h
@@ -36,9 +36,9 @@
#include "mongo/db/jsobj.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/client/shard.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/concurrency/with_lock.h"
@@ -114,7 +114,7 @@ private:
void _rebuildShard(WithLock, ConnectionString const& newConnString, ShardFactory* factory);
// Protects the lookup maps below.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("::_mutex");
using ShardMap = stdx::unordered_map<ShardId, std::shared_ptr<Shard>, ShardId::Hasher>;
@@ -302,7 +302,7 @@ private:
ShardRegistryData _data;
// Protects the _reloadState and _initConfigServerCS during startup.
- mutable stdx::mutex _reloadMutex;
+ mutable Mutex _reloadMutex = MONGO_MAKE_LATCH("ShardRegistry::_reloadMutex");
stdx::condition_variable _inReloadCV;
enum class ReloadState {
diff --git a/src/mongo/s/client/shard_remote.cpp b/src/mongo/s/client/shard_remote.cpp
index 25f2e1959c0..4e684d8fab9 100644
--- a/src/mongo/s/client/shard_remote.cpp
+++ b/src/mongo/s/client/shard_remote.cpp
@@ -143,7 +143,7 @@ void ShardRemote::updateReplSetMonitor(const HostAndPort& remoteHost,
}
void ShardRemote::updateLastCommittedOpTime(LogicalTime lastCommittedOpTime) {
- stdx::lock_guard<stdx::mutex> lk(_lastCommittedOpTimeMutex);
+ stdx::lock_guard<Latch> lk(_lastCommittedOpTimeMutex);
// A secondary may return a lastCommittedOpTime less than the latest seen so far.
if (lastCommittedOpTime > _lastCommittedOpTime) {
@@ -152,7 +152,7 @@ void ShardRemote::updateLastCommittedOpTime(LogicalTime lastCommittedOpTime) {
}
LogicalTime ShardRemote::getLastCommittedOpTime() const {
- stdx::lock_guard<stdx::mutex> lk(_lastCommittedOpTimeMutex);
+ stdx::lock_guard<Latch> lk(_lastCommittedOpTimeMutex);
return _lastCommittedOpTime;
}
diff --git a/src/mongo/s/client/shard_remote.h b/src/mongo/s/client/shard_remote.h
index 3b19fd8ab0f..cf1b7b2d3f8 100644
--- a/src/mongo/s/client/shard_remote.h
+++ b/src/mongo/s/client/shard_remote.h
@@ -34,7 +34,7 @@
#include "mongo/s/client/shard.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -133,7 +133,8 @@ private:
/**
* Protects _lastCommittedOpTime.
*/
- mutable stdx::mutex _lastCommittedOpTimeMutex;
+ mutable Mutex _lastCommittedOpTimeMutex =
+ MONGO_MAKE_LATCH("ShardRemote::_lastCommittedOpTimeMutex");
/**
* Logical time representing the latest opTime timestamp known to be in this shard's majority
diff --git a/src/mongo/s/client/version_manager.cpp b/src/mongo/s/client/version_manager.cpp
index 1ffed487065..27907950ac4 100644
--- a/src/mongo/s/client/version_manager.cpp
+++ b/src/mongo/s/client/version_manager.cpp
@@ -63,14 +63,14 @@ namespace {
class ConnectionShardStatus {
public:
bool hasAnySequenceSet(DBClientBase* conn) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
SequenceMap::const_iterator seenConnIt = _map.find(conn->getConnectionId());
return seenConnIt != _map.end() && seenConnIt->second.size() > 0;
}
bool getSequence(DBClientBase* conn, const string& ns, unsigned long long* sequence) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
SequenceMap::const_iterator seenConnIt = _map.find(conn->getConnectionId());
if (seenConnIt == _map.end())
@@ -85,18 +85,18 @@ public:
}
void setSequence(DBClientBase* conn, const string& ns, const unsigned long long& s) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_map[conn->getConnectionId()][ns] = s;
}
void reset(DBClientBase* conn) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_map.erase(conn->getConnectionId());
}
private:
// protects _map
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ConnectionShardStatus::_mutex");
// a map from a connection into ChunkManager's sequence number for each namespace
typedef map<unsigned long long, map<string, unsigned long long>> SequenceMap;
diff --git a/src/mongo/s/cluster_identity_loader.cpp b/src/mongo/s/cluster_identity_loader.cpp
index ed61976820a..1962272ca5d 100644
--- a/src/mongo/s/cluster_identity_loader.cpp
+++ b/src/mongo/s/cluster_identity_loader.cpp
@@ -56,14 +56,14 @@ ClusterIdentityLoader* ClusterIdentityLoader::get(OperationContext* operationCon
}
OID ClusterIdentityLoader::getClusterId() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
invariant(_initializationState == InitializationState::kInitialized && _lastLoadResult.isOK());
return _lastLoadResult.getValue();
}
Status ClusterIdentityLoader::loadClusterId(OperationContext* opCtx,
const repl::ReadConcernLevel& readConcernLevel) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_initializationState == InitializationState::kInitialized) {
invariant(_lastLoadResult.isOK());
return Status::OK();
@@ -105,7 +105,7 @@ StatusWith<OID> ClusterIdentityLoader::_fetchClusterIdFromConfig(
}
void ClusterIdentityLoader::discardCachedClusterId() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_initializationState == InitializationState::kUninitialized) {
return;
diff --git a/src/mongo/s/cluster_identity_loader.h b/src/mongo/s/cluster_identity_loader.h
index b5ee563d253..6b6d394f9e1 100644
--- a/src/mongo/s/cluster_identity_loader.h
+++ b/src/mongo/s/cluster_identity_loader.h
@@ -33,8 +33,8 @@
#include "mongo/bson/oid.h"
#include "mongo/db/repl/read_concern_args.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -94,7 +94,7 @@ private:
StatusWith<OID> _fetchClusterIdFromConfig(OperationContext* opCtx,
const repl::ReadConcernLevel& readConcernLevel);
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ClusterIdentityLoader::_mutex");
stdx::condition_variable _inReloadCV;
// Used to ensure that only one thread at a time attempts to reload the cluster ID from the
diff --git a/src/mongo/s/cluster_last_error_info.cpp b/src/mongo/s/cluster_last_error_info.cpp
index 4dd79d95fb4..2fe697df461 100644
--- a/src/mongo/s/cluster_last_error_info.cpp
+++ b/src/mongo/s/cluster_last_error_info.cpp
@@ -40,12 +40,12 @@ const Client::Decoration<std::shared_ptr<ClusterLastErrorInfo>> ClusterLastError
Client::declareDecoration<std::shared_ptr<ClusterLastErrorInfo>>();
void ClusterLastErrorInfo::addShardHost(const std::string& shardHost) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_cur->shardHostsWritten.insert(shardHost);
}
void ClusterLastErrorInfo::addHostOpTime(ConnectionString connStr, HostOpTime stat) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_cur->hostOpTimes[connStr] = stat;
}
@@ -56,13 +56,13 @@ void ClusterLastErrorInfo::addHostOpTimes(const HostOpTimeMap& hostOpTimes) {
}
void ClusterLastErrorInfo::newRequest() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
std::swap(_cur, _prev);
_cur->clear();
}
void ClusterLastErrorInfo::disableForCommand() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
RequestInfo* temp = _cur;
_cur = _prev;
_prev = temp;
diff --git a/src/mongo/s/cluster_last_error_info.h b/src/mongo/s/cluster_last_error_info.h
index 0cc07fa27ac..af13045099d 100644
--- a/src/mongo/s/cluster_last_error_info.h
+++ b/src/mongo/s/cluster_last_error_info.h
@@ -63,7 +63,7 @@ public:
* gets shards used on the previous request
*/
std::set<std::string>* getPrevShardHosts() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return &_prev->shardHostsWritten;
}
@@ -71,7 +71,7 @@ public:
* Gets the shards, hosts, and opTimes the client last wrote to with write commands.
*/
const HostOpTimeMap& getPrevHostOpTimes() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _prev->hostOpTimes;
}
@@ -89,7 +89,7 @@ private:
};
// Protects _infos, _cur, and _prev.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ClusterLastErrorInfo::_mutex");
// We use 2 so we can flip for getLastError type operations.
RequestInfo _infos[2];
diff --git a/src/mongo/s/config_server_catalog_cache_loader.cpp b/src/mongo/s/config_server_catalog_cache_loader.cpp
index de8fb1a493f..c4a02e89514 100644
--- a/src/mongo/s/config_server_catalog_cache_loader.cpp
+++ b/src/mongo/s/config_server_catalog_cache_loader.cpp
@@ -160,7 +160,7 @@ void ConfigServerCatalogCacheLoader::onStepUp() {
void ConfigServerCatalogCacheLoader::shutDown() {
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (_inShutdown) {
return;
}
diff --git a/src/mongo/s/config_server_catalog_cache_loader.h b/src/mongo/s/config_server_catalog_cache_loader.h
index 81c81240100..2da4fb9a8e9 100644
--- a/src/mongo/s/config_server_catalog_cache_loader.h
+++ b/src/mongo/s/config_server_catalog_cache_loader.h
@@ -64,7 +64,7 @@ private:
ThreadPool _threadPool;
// Protects the class state below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ConfigServerCatalogCacheLoader::_mutex");
// True if shutDown was called.
bool _inShutdown{false};
diff --git a/src/mongo/s/grid.cpp b/src/mongo/s/grid.cpp
index 97e2ccef518..bda2bc6e929 100644
--- a/src/mongo/s/grid.cpp
+++ b/src/mongo/s/grid.cpp
@@ -96,12 +96,12 @@ void Grid::setShardingInitialized() {
}
Grid::CustomConnectionPoolStatsFn Grid::getCustomConnectionPoolStatsFn() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _customConnectionPoolStatsFn;
}
void Grid::setCustomConnectionPoolStatsFn(CustomConnectionPoolStatsFn statsFn) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(!_customConnectionPoolStatsFn || !statsFn);
_customConnectionPoolStatsFn = std::move(statsFn);
}
@@ -117,7 +117,7 @@ void Grid::setAllowLocalHost(bool allow) {
repl::OpTime Grid::configOpTime() const {
invariant(serverGlobalParams.clusterRole != ClusterRole::ConfigServer);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _configOpTime;
}
@@ -141,7 +141,7 @@ boost::optional<repl::OpTime> Grid::advanceConfigOpTime(OperationContext* opCtx,
boost::optional<repl::OpTime> Grid::_advanceConfigOpTime(const repl::OpTime& opTime) {
invariant(serverGlobalParams.clusterRole != ClusterRole::ConfigServer);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_configOpTime < opTime) {
repl::OpTime prev = _configOpTime;
_configOpTime = opTime;
diff --git a/src/mongo/s/grid.h b/src/mongo/s/grid.h
index 2356b01b029..92b7a761f97 100644
--- a/src/mongo/s/grid.h
+++ b/src/mongo/s/grid.h
@@ -33,10 +33,10 @@
#include <memory>
#include "mongo/db/repl/optime.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog_cache.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -192,7 +192,7 @@ private:
AtomicWord<bool> _shardingInitialized{false};
// Protects _configOpTime.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("Grid::_mutex");
// Last known highest opTime from the config server that should be used when doing reads.
// This value is updated any time a shard or mongos talks to a config server or a shard.
diff --git a/src/mongo/s/query/async_results_merger.cpp b/src/mongo/s/query/async_results_merger.cpp
index f96f9a635b6..bc89f6aa19a 100644
--- a/src/mongo/s/query/async_results_merger.cpp
+++ b/src/mongo/s/query/async_results_merger.cpp
@@ -115,12 +115,12 @@ AsyncResultsMerger::AsyncResultsMerger(OperationContext* opCtx,
}
AsyncResultsMerger::~AsyncResultsMerger() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_remotesExhausted(lk) || _lifecycleState == kKillComplete);
}
bool AsyncResultsMerger::remotesExhausted() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _remotesExhausted(lk);
}
@@ -135,7 +135,7 @@ bool AsyncResultsMerger::_remotesExhausted(WithLock) const {
}
Status AsyncResultsMerger::setAwaitDataTimeout(Milliseconds awaitDataTimeout) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_tailableMode != TailableModeEnum::kTailableAndAwaitData) {
return Status(ErrorCodes::BadValue,
@@ -155,12 +155,12 @@ Status AsyncResultsMerger::setAwaitDataTimeout(Milliseconds awaitDataTimeout) {
}
bool AsyncResultsMerger::ready() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _ready(lk);
}
void AsyncResultsMerger::detachFromOperationContext() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_opCtx = nullptr;
// If we were about ready to return a boost::none because a tailable cursor reached the end of
// the batch, that should no longer apply to the next use - when we are reattached to a
@@ -170,13 +170,13 @@ void AsyncResultsMerger::detachFromOperationContext() {
}
void AsyncResultsMerger::reattachToOperationContext(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(!_opCtx);
_opCtx = opCtx;
}
void AsyncResultsMerger::addNewShardCursors(std::vector<RemoteCursor>&& newCursors) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// Create a new entry in the '_remotes' list for each new shard, and add the first cursor batch
// to its buffer. This ensures the shard's initial high water mark is respected, if it exists.
for (auto&& remote : newCursors) {
@@ -189,7 +189,7 @@ void AsyncResultsMerger::addNewShardCursors(std::vector<RemoteCursor>&& newCurso
}
BSONObj AsyncResultsMerger::getHighWaterMark() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto minPromisedSortKey = _getMinPromisedSortKey(lk);
if (!minPromisedSortKey.isEmpty() && !_ready(lk)) {
_highWaterMark = minPromisedSortKey;
@@ -272,7 +272,7 @@ bool AsyncResultsMerger::_readyUnsorted(WithLock) {
}
StatusWith<ClusterQueryResult> AsyncResultsMerger::nextReady() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
dassert(_ready(lk));
if (_lifecycleState != kAlive) {
return Status(ErrorCodes::IllegalOperation, "AsyncResultsMerger killed");
@@ -400,7 +400,7 @@ Status AsyncResultsMerger::_askForNextBatch(WithLock, size_t remoteIndex) {
auto callbackStatus =
_executor->scheduleRemoteCommand(request, [this, remoteIndex](auto const& cbData) {
- stdx::lock_guard<stdx::mutex> lk(this->_mutex);
+ stdx::lock_guard<Latch> lk(this->_mutex);
this->_handleBatchResponse(lk, cbData, remoteIndex);
});
@@ -413,7 +413,7 @@ Status AsyncResultsMerger::_askForNextBatch(WithLock, size_t remoteIndex) {
}
Status AsyncResultsMerger::scheduleGetMores() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _scheduleGetMores(lk);
}
@@ -447,7 +447,7 @@ Status AsyncResultsMerger::_scheduleGetMores(WithLock lk) {
* 3. Remotes that reached maximum retries will be in 'exhausted' state.
*/
StatusWith<executor::TaskExecutor::EventHandle> AsyncResultsMerger::nextEvent() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_lifecycleState != kAlive) {
// Can't schedule further network operations if the ARM is being killed.
@@ -704,7 +704,7 @@ void AsyncResultsMerger::_scheduleKillCursors(WithLock, OperationContext* opCtx)
}
executor::TaskExecutor::EventHandle AsyncResultsMerger::kill(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_killCompleteEvent.isValid()) {
invariant(_lifecycleState != kAlive);
diff --git a/src/mongo/s/query/async_results_merger.h b/src/mongo/s/query/async_results_merger.h
index 3cf357dca6b..e0e0f2e94c1 100644
--- a/src/mongo/s/query/async_results_merger.h
+++ b/src/mongo/s/query/async_results_merger.h
@@ -37,9 +37,9 @@
#include "mongo/bson/bsonobj.h"
#include "mongo/db/cursor_id.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/query/async_results_merger_params_gen.h"
#include "mongo/s/query/cluster_query_result.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/net/hostandport.h"
#include "mongo/util/time_support.h"
@@ -451,7 +451,7 @@ private:
AsyncResultsMergerParams _params;
// Must be acquired before accessing any data members (other than _params, which is read-only).
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("AsyncResultsMerger::_mutex");
// Data tracking the state of our communication with each of the remote nodes.
std::vector<RemoteCursorData> _remotes;
diff --git a/src/mongo/s/query/blocking_results_merger_test.cpp b/src/mongo/s/query/blocking_results_merger_test.cpp
index 5d07b0e2c75..c99aff31fcf 100644
--- a/src/mongo/s/query/blocking_results_merger_test.cpp
+++ b/src/mongo/s/query/blocking_results_merger_test.cpp
@@ -157,13 +157,13 @@ TEST_F(ResultsMergerTestFixture, ShouldBeAbleToBlockUntilNextResultIsReadyWithDe
future.default_timed_get();
// Used for synchronizing the background thread with this thread.
- stdx::mutex mutex;
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ auto mutex = MONGO_MAKE_LATCH();
+ stdx::unique_lock<Latch> lk(mutex);
// Issue a blocking wait for the next result asynchronously on a different thread.
future = launchAsync([&]() {
// Block until the main thread has responded to the getMore.
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
auto next = unittest::assertGet(blockingMerger.next(
operationContext(), RouterExecStage::ExecContext::kGetMoreNoResultsYet));
diff --git a/src/mongo/s/query/cluster_cursor_manager.cpp b/src/mongo/s/query/cluster_cursor_manager.cpp
index d1b39da34b1..03dfd1114f5 100644
--- a/src/mongo/s/query/cluster_cursor_manager.cpp
+++ b/src/mongo/s/query/cluster_cursor_manager.cpp
@@ -160,7 +160,7 @@ ClusterCursorManager::~ClusterCursorManager() {
void ClusterCursorManager::shutdown(OperationContext* opCtx) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_inShutdown = true;
}
killAllCursors(opCtx);
@@ -176,7 +176,7 @@ StatusWith<CursorId> ClusterCursorManager::registerCursor(
// Read the clock out of the lock.
const auto now = _clockSource->now();
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_inShutdown) {
lk.unlock();
@@ -239,7 +239,7 @@ StatusWith<ClusterCursorManager::PinnedCursor> ClusterCursorManager::checkOutCur
OperationContext* opCtx,
AuthzCheckFn authChecker,
AuthCheck checkSessionAuth) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_inShutdown) {
return Status(ErrorCodes::ShutdownInProgress,
@@ -299,7 +299,7 @@ void ClusterCursorManager::checkInCursor(std::unique_ptr<ClusterClientCursor> cu
cursor->detachFromOperationContext();
cursor->setLastUseDate(now);
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
CursorEntry* entry = _getEntry(lk, nss, cursorId);
invariant(entry);
@@ -324,7 +324,7 @@ Status ClusterCursorManager::checkAuthForKillCursors(OperationContext* opCtx,
const NamespaceString& nss,
CursorId cursorId,
AuthzCheckFn authChecker) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto entry = _getEntry(lk, nss, cursorId);
if (!entry) {
@@ -352,7 +352,7 @@ Status ClusterCursorManager::killCursor(OperationContext* opCtx,
CursorId cursorId) {
invariant(opCtx);
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
CursorEntry* entry = _getEntry(lk, nss, cursorId);
if (!entry) {
@@ -376,7 +376,7 @@ Status ClusterCursorManager::killCursor(OperationContext* opCtx,
return Status::OK();
}
-void ClusterCursorManager::detachAndKillCursor(stdx::unique_lock<stdx::mutex> lk,
+void ClusterCursorManager::detachAndKillCursor(stdx::unique_lock<Latch> lk,
OperationContext* opCtx,
const NamespaceString& nss,
CursorId cursorId) {
@@ -390,7 +390,7 @@ void ClusterCursorManager::detachAndKillCursor(stdx::unique_lock<stdx::mutex> lk
std::size_t ClusterCursorManager::killMortalCursorsInactiveSince(OperationContext* opCtx,
Date_t cutoff) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto pred = [cutoff](CursorId cursorId, const CursorEntry& entry) -> bool {
bool res = entry.getLifetimeType() == CursorLifetime::Mortal &&
@@ -408,14 +408,14 @@ std::size_t ClusterCursorManager::killMortalCursorsInactiveSince(OperationContex
}
void ClusterCursorManager::killAllCursors(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto pred = [](CursorId, const CursorEntry&) -> bool { return true; };
killCursorsSatisfying(std::move(lk), opCtx, std::move(pred));
}
std::size_t ClusterCursorManager::killCursorsSatisfying(
- stdx::unique_lock<stdx::mutex> lk,
+ stdx::unique_lock<Latch> lk,
OperationContext* opCtx,
std::function<bool(CursorId, const CursorEntry&)> pred) {
invariant(opCtx);
@@ -471,7 +471,7 @@ std::size_t ClusterCursorManager::killCursorsSatisfying(
}
ClusterCursorManager::Stats ClusterCursorManager::stats() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
Stats stats;
@@ -504,7 +504,7 @@ ClusterCursorManager::Stats ClusterCursorManager::stats() const {
}
void ClusterCursorManager::appendActiveSessions(LogicalSessionIdSet* lsids) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (const auto& nsContainerPair : _namespaceToContainerMap) {
for (const auto& cursorIdEntryPair : nsContainerPair.second.entryMap) {
@@ -545,7 +545,7 @@ std::vector<GenericCursor> ClusterCursorManager::getIdleCursors(
const OperationContext* opCtx, MongoProcessInterface::CurrentOpUserMode userMode) const {
std::vector<GenericCursor> cursors;
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
AuthorizationSession* ctxAuth = AuthorizationSession::get(opCtx->getClient());
@@ -593,7 +593,7 @@ std::pair<Status, int> ClusterCursorManager::killCursorsWithMatchingSessions(
stdx::unordered_set<CursorId> ClusterCursorManager::getCursorsForSession(
LogicalSessionId lsid) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
stdx::unordered_set<CursorId> cursorIds;
@@ -618,7 +618,7 @@ stdx::unordered_set<CursorId> ClusterCursorManager::getCursorsForSession(
boost::optional<NamespaceString> ClusterCursorManager::getNamespaceForCursorId(
CursorId cursorId) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
const auto it = _cursorIdPrefixToNamespaceMap.find(extractPrefixFromCursorId(cursorId));
if (it == _cursorIdPrefixToNamespaceMap.end()) {
diff --git a/src/mongo/s/query/cluster_cursor_manager.h b/src/mongo/s/query/cluster_cursor_manager.h
index af2ea16581b..a54e2d8bea3 100644
--- a/src/mongo/s/query/cluster_cursor_manager.h
+++ b/src/mongo/s/query/cluster_cursor_manager.h
@@ -38,11 +38,11 @@
#include "mongo/db/kill_sessions.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/session_killer.h"
+#include "mongo/platform/mutex.h"
#include "mongo/platform/random.h"
#include "mongo/s/query/cluster_client_cursor.h"
#include "mongo/s/query/cluster_client_cursor_guard.h"
#include "mongo/s/query/cluster_client_cursor_params.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/time_support.h"
@@ -406,7 +406,7 @@ private:
/**
* Will detach a cursor, release the lock and then call kill() on it.
*/
- void detachAndKillCursor(stdx::unique_lock<stdx::mutex> lk,
+ void detachAndKillCursor(stdx::unique_lock<Latch> lk,
OperationContext* opCtx,
const NamespaceString& nss,
CursorId cursorId);
@@ -443,7 +443,7 @@ private:
*
* Returns the number of cursors killed.
*/
- std::size_t killCursorsSatisfying(stdx::unique_lock<stdx::mutex> lk,
+ std::size_t killCursorsSatisfying(stdx::unique_lock<Latch> lk,
OperationContext* opCtx,
std::function<bool(CursorId, const CursorEntry&)> pred);
@@ -597,7 +597,7 @@ private:
ClockSource* _clockSource;
// Synchronizes access to all private state variables below.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ClusterCursorManager::_mutex");
bool _inShutdown{false};
diff --git a/src/mongo/s/query/establish_cursors.h b/src/mongo/s/query/establish_cursors.h
index 97e72225072..95f6e7ae9d0 100644
--- a/src/mongo/s/query/establish_cursors.h
+++ b/src/mongo/s/query/establish_cursors.h
@@ -37,9 +37,9 @@
#include "mongo/bson/bsonobj.h"
#include "mongo/db/cursor_id.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/client/shard.h"
#include "mongo/s/query/async_results_merger_params_gen.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/net/hostandport.h"
#include "mongo/util/time_support.h"
diff --git a/src/mongo/s/router_transactions_metrics.cpp b/src/mongo/s/router_transactions_metrics.cpp
index ddc8406354f..b3cad2b1edc 100644
--- a/src/mongo/s/router_transactions_metrics.cpp
+++ b/src/mongo/s/router_transactions_metrics.cpp
@@ -223,7 +223,7 @@ void RouterTransactionsMetrics::incrementCommitSuccessful(TransactionRouter::Com
void RouterTransactionsMetrics::incrementAbortCauseMap(std::string abortCause) {
invariant(!abortCause.empty());
- stdx::lock_guard<stdx::mutex> lock(_abortCauseMutex);
+ stdx::lock_guard<Latch> lock(_abortCauseMutex);
auto it = _abortCauseMap.find(abortCause);
if (it == _abortCauseMap.end()) {
_abortCauseMap.emplace(std::pair<std::string, std::int64_t>(std::move(abortCause), 1));
@@ -263,7 +263,7 @@ void RouterTransactionsMetrics::updateStats(RouterTransactionsStats* stats) {
BSONObjBuilder bob;
{
- stdx::lock_guard<stdx::mutex> lock(_abortCauseMutex);
+ stdx::lock_guard<Latch> lock(_abortCauseMutex);
for (auto const& abortCauseEntry : _abortCauseMap) {
bob.append(abortCauseEntry.first, abortCauseEntry.second);
}
diff --git a/src/mongo/s/router_transactions_metrics.h b/src/mongo/s/router_transactions_metrics.h
index ed496fe394c..5c52a8e20d0 100644
--- a/src/mongo/s/router_transactions_metrics.h
+++ b/src/mongo/s/router_transactions_metrics.h
@@ -147,7 +147,7 @@ private:
CommitStats _recoverWithTokenCommitStats;
// Mutual exclusion for _abortCauseMap
- stdx::mutex _abortCauseMutex;
+ Mutex _abortCauseMutex = MONGO_MAKE_LATCH("RouterTransactionsMetrics::_abortCauseMutex");
// Map tracking the total number of each abort cause for any multi-statement transaction that
// was aborted through this router.
diff --git a/src/mongo/s/sharding_task_executor.h b/src/mongo/s/sharding_task_executor.h
index 0f034d144f1..e370a5425a5 100644
--- a/src/mongo/s/sharding_task_executor.h
+++ b/src/mongo/s/sharding_task_executor.h
@@ -34,8 +34,8 @@
#include "mongo/base/status_with.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace executor {
diff --git a/src/mongo/s/sharding_task_executor_pool_controller.h b/src/mongo/s/sharding_task_executor_pool_controller.h
index c077578892f..d9b82233974 100644
--- a/src/mongo/s/sharding_task_executor_pool_controller.h
+++ b/src/mongo/s/sharding_task_executor_pool_controller.h
@@ -35,7 +35,7 @@
#include "mongo/client/replica_set_change_notifier.h"
#include "mongo/executor/connection_pool.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
namespace mongo {
@@ -194,7 +194,7 @@ private:
ReplicaSetChangeListenerHandle _listener;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ShardingTaskExecutorPoolController::_mutex");
// Entires to _poolDatas are added by addHost() and removed by removeHost()
stdx::unordered_map<PoolId, PoolData> _poolDatas;