diff options
Diffstat (limited to 'src/mongo/db/storage')
38 files changed, 214 insertions, 200 deletions
diff --git a/src/mongo/db/storage/biggie/biggie_kv_engine.cpp b/src/mongo/db/storage/biggie/biggie_kv_engine.cpp index 977ba1d6b52..370ececd244 100644 --- a/src/mongo/db/storage/biggie/biggie_kv_engine.cpp +++ b/src/mongo/db/storage/biggie/biggie_kv_engine.cpp @@ -94,7 +94,7 @@ std::unique_ptr<mongo::RecordStore> KVEngine::getRecordStore(OperationContext* o } bool KVEngine::trySwapMaster(StringStore& newMaster, uint64_t version) { - stdx::lock_guard<stdx::mutex> lock(_masterLock); + stdx::lock_guard<Latch> lock(_masterLock); invariant(!newMaster.hasBranch() && !_master.hasBranch()); if (_masterVersion != version) return false; diff --git a/src/mongo/db/storage/biggie/biggie_kv_engine.h b/src/mongo/db/storage/biggie/biggie_kv_engine.h index 97c836b523a..a9a3582cfdd 100644 --- a/src/mongo/db/storage/biggie/biggie_kv_engine.h +++ b/src/mongo/db/storage/biggie/biggie_kv_engine.h @@ -154,7 +154,7 @@ public: * Returns a pair of the current version and copy of tree of the master. */ std::pair<uint64_t, StringStore> getMasterInfo() { - stdx::lock_guard<stdx::mutex> lock(_masterLock); + stdx::lock_guard<Latch> lock(_masterLock); return std::make_pair(_masterVersion, _master); } @@ -170,7 +170,7 @@ private: std::map<std::string, bool> _idents; // TODO : replace with a query to _master. std::unique_ptr<VisibilityManager> _visibilityManager; - mutable stdx::mutex _masterLock; + mutable Mutex _masterLock = MONGO_MAKE_LATCH("KVEngine::_masterLock"); StringStore _master; uint64_t _masterVersion = 0; }; diff --git a/src/mongo/db/storage/biggie/biggie_record_store.cpp b/src/mongo/db/storage/biggie/biggie_record_store.cpp index 4c47df9cd7b..8e2aadc7041 100644 --- a/src/mongo/db/storage/biggie/biggie_record_store.cpp +++ b/src/mongo/db/storage/biggie/biggie_record_store.cpp @@ -120,7 +120,7 @@ bool RecordStore::isCapped() const { } void RecordStore::setCappedCallback(CappedCallback* cb) { - stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex); + stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex); _cappedCallback = cb; } @@ -264,7 +264,7 @@ void RecordStore::cappedTruncateAfter(OperationContext* opCtx, RecordId end, boo auto endIt = workingCopy->upper_bound(_postfix); while (recordIt != endIt) { - stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex); + stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex); if (_cappedCallback) { // Documents are guaranteed to have a RecordId at the end of the KeyString, unlike // unique indexes. @@ -357,11 +357,11 @@ void RecordStore::_cappedDeleteAsNeeded(OperationContext* opCtx, StringStore* wo auto recordIt = workingCopy->lower_bound(_prefix); // Ensure only one thread at a time can do deletes, otherwise they'll conflict. - stdx::lock_guard<stdx::mutex> cappedDeleterLock(_cappedDeleterMutex); + stdx::lock_guard<Latch> cappedDeleterLock(_cappedDeleterMutex); while (_cappedAndNeedDelete(opCtx, workingCopy)) { - stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex); + stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex); RecordId rid = RecordId(extractRecordId(recordIt->first)); if (_isOplog && _visibilityManager->isFirstHidden(rid)) { diff --git a/src/mongo/db/storage/biggie/biggie_record_store.h b/src/mongo/db/storage/biggie/biggie_record_store.h index e8dee66da1c..005d49ee293 100644 --- a/src/mongo/db/storage/biggie/biggie_record_store.h +++ b/src/mongo/db/storage/biggie/biggie_record_store.h @@ -38,7 +38,7 @@ #include "mongo/db/storage/capped_callback.h" #include "mongo/db/storage/record_store.h" #include "mongo/platform/atomic_word.h" -#include "mongo/stdx/mutex.h" +#include "mongo/platform/mutex.h" namespace mongo { namespace biggie { @@ -138,10 +138,11 @@ private: std::string _prefix; std::string _postfix; - mutable stdx::mutex _cappedCallbackMutex; // Guards _cappedCallback + mutable Mutex _cappedCallbackMutex = + MONGO_MAKE_LATCH("RecordStore::_cappedCallbackMutex"); // Guards _cappedCallback CappedCallback* _cappedCallback; - mutable stdx::mutex _cappedDeleterMutex; + mutable Mutex _cappedDeleterMutex = MONGO_MAKE_LATCH("RecordStore::_cappedDeleterMutex"); AtomicWord<long long> _highestRecordId{1}; AtomicWord<long long> _numRecords{0}; diff --git a/src/mongo/db/storage/biggie/biggie_visibility_manager.cpp b/src/mongo/db/storage/biggie/biggie_visibility_manager.cpp index d9921bc6472..94a869727b0 100644 --- a/src/mongo/db/storage/biggie/biggie_visibility_manager.cpp +++ b/src/mongo/db/storage/biggie/biggie_visibility_manager.cpp @@ -56,7 +56,7 @@ public: virtual void rollback() { _visibilityManager->dealtWithRecord(_rid); - stdx::lock_guard<stdx::mutex> lk(_rs->_cappedCallbackMutex); + stdx::lock_guard<Latch> lk(_rs->_cappedCallbackMutex); if (_rs->_cappedCallback) _rs->_cappedCallback->notifyCappedWaitersIfNeeded(); } @@ -68,7 +68,7 @@ private: }; void VisibilityManager::dealtWithRecord(RecordId rid) { - stdx::lock_guard<stdx::mutex> lock(_stateLock); + stdx::lock_guard<Latch> lock(_stateLock); _uncommittedRecords.erase(rid); _opsBecameVisibleCV.notify_all(); } @@ -76,7 +76,7 @@ void VisibilityManager::dealtWithRecord(RecordId rid) { void VisibilityManager::addUncommittedRecord(OperationContext* opCtx, RecordStore* rs, RecordId rid) { - stdx::lock_guard<stdx::mutex> lock(_stateLock); + stdx::lock_guard<Latch> lock(_stateLock); _uncommittedRecords.insert(rid); opCtx->recoveryUnit()->registerChange(std::make_unique<VisibilityManagerChange>(this, rs, rid)); @@ -85,13 +85,13 @@ void VisibilityManager::addUncommittedRecord(OperationContext* opCtx, } RecordId VisibilityManager::getAllCommittedRecord() { - stdx::lock_guard<stdx::mutex> lock(_stateLock); + stdx::lock_guard<Latch> lock(_stateLock); return _uncommittedRecords.empty() ? _highestSeen : RecordId(_uncommittedRecords.begin()->repr() - 1); } bool VisibilityManager::isFirstHidden(RecordId rid) { - stdx::lock_guard<stdx::mutex> lock(_stateLock); + stdx::lock_guard<Latch> lock(_stateLock); if (_uncommittedRecords.empty()) return false; return *_uncommittedRecords.begin() == rid; @@ -100,7 +100,7 @@ bool VisibilityManager::isFirstHidden(RecordId rid) { void VisibilityManager::waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx) { invariant(opCtx->lockState()->isNoop() || !opCtx->lockState()->inAWriteUnitOfWork()); - stdx::unique_lock<stdx::mutex> lock(_stateLock); + stdx::unique_lock<Latch> lock(_stateLock); const RecordId waitFor = _highestSeen; opCtx->waitForConditionOrInterrupt(_opsBecameVisibleCV, lock, [&] { return _uncommittedRecords.empty() || *_uncommittedRecords.begin() > waitFor; diff --git a/src/mongo/db/storage/biggie/biggie_visibility_manager.h b/src/mongo/db/storage/biggie/biggie_visibility_manager.h index 387b7edc0d0..8370ba0c990 100644 --- a/src/mongo/db/storage/biggie/biggie_visibility_manager.h +++ b/src/mongo/db/storage/biggie/biggie_visibility_manager.h @@ -31,7 +31,7 @@ #include "mongo/db/operation_context.h" #include "mongo/db/record_id.h" -#include "mongo/stdx/condition_variable.h" +#include "mongo/platform/condition_variable.h" #include "mongo/util/concurrency/mutex.h" namespace mongo { @@ -76,7 +76,8 @@ public: void waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx); private: - mutable stdx::mutex _stateLock; // Protects the values below. + mutable Mutex _stateLock = + MONGO_MAKE_LATCH("VisibilityManager::_stateLock"); // Protects the values below. RecordId _highestSeen = RecordId(); // Used to wait for all earlier oplog writes to be visible. diff --git a/src/mongo/db/storage/durable_catalog_impl.cpp b/src/mongo/db/storage/durable_catalog_impl.cpp index fc88ca957ff..0bc79d049ba 100644 --- a/src/mongo/db/storage/durable_catalog_impl.cpp +++ b/src/mongo/db/storage/durable_catalog_impl.cpp @@ -151,7 +151,7 @@ public: virtual void commit(boost::optional<Timestamp>) {} virtual void rollback() { - stdx::lock_guard<stdx::mutex> lk(_catalog->_identsLock); + stdx::lock_guard<Latch> lk(_catalog->_identsLock); _catalog->_idents.erase(_ident); } @@ -166,7 +166,7 @@ public: virtual void commit(boost::optional<Timestamp>) {} virtual void rollback() { - stdx::lock_guard<stdx::mutex> lk(_catalog->_identsLock); + stdx::lock_guard<Latch> lk(_catalog->_identsLock); _catalog->_idents[_ident] = _entry; } @@ -471,7 +471,7 @@ void DurableCatalogImpl::init(OperationContext* opCtx) { } std::vector<NamespaceString> DurableCatalogImpl::getAllCollections() const { - stdx::lock_guard<stdx::mutex> lk(_identsLock); + stdx::lock_guard<Latch> lk(_identsLock); std::vector<NamespaceString> result; for (NSToIdentMap::const_iterator it = _idents.begin(); it != _idents.end(); ++it) { result.push_back(NamespaceString(it->first)); @@ -487,7 +487,7 @@ Status DurableCatalogImpl::_addEntry(OperationContext* opCtx, const string ident = _newUniqueIdent(nss, "collection"); - stdx::lock_guard<stdx::mutex> lk(_identsLock); + stdx::lock_guard<Latch> lk(_identsLock); Entry& old = _idents[nss.toString()]; if (!old.ident.empty()) { return Status(ErrorCodes::NamespaceExists, "collection already exists"); @@ -517,7 +517,7 @@ Status DurableCatalogImpl::_addEntry(OperationContext* opCtx, } std::string DurableCatalogImpl::getCollectionIdent(const NamespaceString& nss) const { - stdx::lock_guard<stdx::mutex> lk(_identsLock); + stdx::lock_guard<Latch> lk(_identsLock); NSToIdentMap::const_iterator it = _idents.find(nss.toString()); invariant(it != _idents.end()); return it->second.ident; @@ -536,7 +536,7 @@ BSONObj DurableCatalogImpl::_findEntry(OperationContext* opCtx, RecordId* out) const { RecordId dl; { - stdx::lock_guard<stdx::mutex> lk(_identsLock); + stdx::lock_guard<Latch> lk(_identsLock); NSToIdentMap::const_iterator it = _idents.find(nss.toString()); invariant(it != _idents.end(), str::stream() << "Did not find collection. Ns: " << nss); dl = it->second.storedLoc; @@ -652,7 +652,7 @@ Status DurableCatalogImpl::_replaceEntry(OperationContext* opCtx, fassert(28522, status); } - stdx::lock_guard<stdx::mutex> lk(_identsLock); + stdx::lock_guard<Latch> lk(_identsLock); const NSToIdentMap::iterator fromIt = _idents.find(fromNss.toString()); invariant(fromIt != _idents.end()); @@ -673,7 +673,7 @@ Status DurableCatalogImpl::_replaceEntry(OperationContext* opCtx, Status DurableCatalogImpl::_removeEntry(OperationContext* opCtx, const NamespaceString& nss) { invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X)); - stdx::lock_guard<stdx::mutex> lk(_identsLock); + stdx::lock_guard<Latch> lk(_identsLock); const NSToIdentMap::iterator it = _idents.find(nss.toString()); if (it == _idents.end()) { return Status(ErrorCodes::NamespaceNotFound, "collection not found"); @@ -693,7 +693,7 @@ std::vector<std::string> DurableCatalogImpl::getAllIdentsForDB(StringData db) co std::vector<std::string> v; { - stdx::lock_guard<stdx::mutex> lk(_identsLock); + stdx::lock_guard<Latch> lk(_identsLock); for (NSToIdentMap::const_iterator it = _idents.begin(); it != _idents.end(); ++it) { NamespaceString ns(it->first); if (ns.db() != db) @@ -761,7 +761,7 @@ StatusWith<std::string> DurableCatalogImpl::newOrphanedIdent(OperationContext* o NamespaceString::kOrphanCollectionPrefix + identNs) .ns(); - stdx::lock_guard<stdx::mutex> lk(_identsLock); + stdx::lock_guard<Latch> lk(_identsLock); Entry& old = _idents[ns]; if (!old.ident.empty()) { return Status(ErrorCodes::NamespaceExists, diff --git a/src/mongo/db/storage/durable_catalog_impl.h b/src/mongo/db/storage/durable_catalog_impl.h index b7683e9da68..f99ff41da5b 100644 --- a/src/mongo/db/storage/durable_catalog_impl.h +++ b/src/mongo/db/storage/durable_catalog_impl.h @@ -40,7 +40,7 @@ #include "mongo/db/storage/bson_collection_catalog_entry.h" #include "mongo/db/storage/durable_catalog.h" #include "mongo/db/storage/kv/kv_prefix.h" -#include "mongo/stdx/mutex.h" +#include "mongo/platform/mutex.h" namespace mongo { @@ -261,7 +261,7 @@ private: }; typedef std::map<std::string, Entry> NSToIdentMap; NSToIdentMap _idents; - mutable stdx::mutex _identsLock; + mutable Mutex _identsLock = MONGO_MAKE_LATCH("DurableCatalogImpl::_identsLock"); // Manages the feature document that may be present in the DurableCatalogImpl. '_featureTracker' // is guaranteed to be non-null after DurableCatalogImpl::init() is called. diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp index 1f689ddd607..597bc513d20 100644 --- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp +++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp @@ -43,7 +43,7 @@ namespace mongo { RecoveryUnit* EphemeralForTestEngine::newRecoveryUnit() { return new EphemeralForTestRecoveryUnit([this]() { - stdx::lock_guard<stdx::mutex> lk(_mutex); + stdx::lock_guard<Latch> lk(_mutex); JournalListener::Token token = _journalListener->getToken(); _journalListener->onDurable(token); }); @@ -55,14 +55,14 @@ Status EphemeralForTestEngine::createRecordStore(OperationContext* opCtx, const CollectionOptions& options) { // Register the ident in the `_dataMap` (for `getAllIdents`). Remainder of work done in // `getRecordStore`. - stdx::lock_guard<stdx::mutex> lk(_mutex); + stdx::lock_guard<Latch> lk(_mutex); _dataMap[ident] = {}; return Status::OK(); } std::unique_ptr<RecordStore> EphemeralForTestEngine::getRecordStore( OperationContext* opCtx, StringData ns, StringData ident, const CollectionOptions& options) { - stdx::lock_guard<stdx::mutex> lk(_mutex); + stdx::lock_guard<Latch> lk(_mutex); if (options.capped) { return std::make_unique<EphemeralForTestRecordStore>( ns, @@ -77,7 +77,7 @@ std::unique_ptr<RecordStore> EphemeralForTestEngine::getRecordStore( std::unique_ptr<RecordStore> EphemeralForTestEngine::makeTemporaryRecordStore( OperationContext* opCtx, StringData ident) { - stdx::lock_guard<stdx::mutex> lk(_mutex); + stdx::lock_guard<Latch> lk(_mutex); _dataMap[ident] = {}; return std::make_unique<EphemeralForTestRecordStore>(ident, &_dataMap[ident]); } @@ -88,14 +88,14 @@ Status EphemeralForTestEngine::createSortedDataInterface(OperationContext* opCtx const IndexDescriptor* desc) { // Register the ident in `_dataMap` (for `getAllIdents`). Remainder of work done in // `getSortedDataInterface`. - stdx::lock_guard<stdx::mutex> lk(_mutex); + stdx::lock_guard<Latch> lk(_mutex); _dataMap[ident] = {}; return Status::OK(); } std::unique_ptr<SortedDataInterface> EphemeralForTestEngine::getSortedDataInterface( OperationContext* opCtx, StringData ident, const IndexDescriptor* desc) { - stdx::lock_guard<stdx::mutex> lk(_mutex); + stdx::lock_guard<Latch> lk(_mutex); return getEphemeralForTestBtreeImpl(Ordering::make(desc->keyPattern()), desc->unique(), desc->parentNS(), @@ -105,7 +105,7 @@ std::unique_ptr<SortedDataInterface> EphemeralForTestEngine::getSortedDataInterf } Status EphemeralForTestEngine::dropIdent(OperationContext* opCtx, StringData ident) { - stdx::lock_guard<stdx::mutex> lk(_mutex); + stdx::lock_guard<Latch> lk(_mutex); _dataMap.erase(ident); return Status::OK(); } @@ -117,7 +117,7 @@ int64_t EphemeralForTestEngine::getIdentSize(OperationContext* opCtx, StringData std::vector<std::string> EphemeralForTestEngine::getAllIdents(OperationContext* opCtx) const { std::vector<std::string> all; { - stdx::lock_guard<stdx::mutex> lk(_mutex); + stdx::lock_guard<Latch> lk(_mutex); for (DataMap::const_iterator it = _dataMap.begin(); it != _dataMap.end(); ++it) { all.push_back(it->first); } diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h index a083f9f3a4b..b51b285ef2c 100644 --- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h +++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h @@ -33,7 +33,7 @@ #include "mongo/db/storage/journal_listener.h" #include "mongo/db/storage/kv/kv_engine.h" -#include "mongo/stdx/mutex.h" +#include "mongo/platform/mutex.h" #include "mongo/util/string_map.h" namespace mongo { @@ -102,13 +102,12 @@ public: virtual bool hasIdent(OperationContext* opCtx, StringData ident) const { return _dataMap.find(ident) != _dataMap.end(); - ; } std::vector<std::string> getAllIdents(OperationContext* opCtx) const; void setJournalListener(JournalListener* jl) final { - stdx::unique_lock<stdx::mutex> lk(_mutex); + stdx::unique_lock<Latch> lk(_mutex); _journalListener = jl; } @@ -127,7 +126,7 @@ public: private: typedef StringMap<std::shared_ptr<void>> DataMap; - mutable stdx::mutex _mutex; + mutable Mutex _mutex = MONGO_MAKE_LATCH("EphemeralForTestEngine::_mutex"); DataMap _dataMap; // All actual data is owned in here // Notified when we write as everything is considered "journalled" since repl depends on it. diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h index 2fdbaaa579e..3bd7ffb0ce5 100644 --- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h +++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h @@ -35,7 +35,7 @@ #include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/storage/capped_callback.h" #include "mongo/db/storage/record_store.h" -#include "mongo/stdx/mutex.h" +#include "mongo/platform/mutex.h" #include "mongo/util/concurrency/with_lock.h" diff --git a/src/mongo/db/storage/flow_control.cpp b/src/mongo/db/storage/flow_control.cpp index ac010f891ae..c6976fd77b2 100644 --- a/src/mongo/db/storage/flow_control.cpp +++ b/src/mongo/db/storage/flow_control.cpp @@ -172,7 +172,7 @@ double FlowControl::_getLocksPerOp() { Sample backOne; std::size_t numSamples; { - stdx::lock_guard<stdx::mutex> lk(_sampledOpsMutex); + stdx::lock_guard<Latch> lk(_sampledOpsMutex); numSamples = _sampledOpsApplied.size(); if (numSamples >= 2) { backTwo = _sampledOpsApplied[numSamples - 2]; @@ -399,7 +399,7 @@ std::int64_t FlowControl::_approximateOpsBetween(Timestamp prevTs, Timestamp cur std::int64_t prevApplied = -1; std::int64_t currApplied = -1; - stdx::lock_guard<stdx::mutex> lk(_sampledOpsMutex); + stdx::lock_guard<Latch> lk(_sampledOpsMutex); for (auto&& sample : _sampledOpsApplied) { if (prevApplied == -1 && prevTs.asULL() <= std::get<0>(sample)) { prevApplied = std::get<1>(sample); @@ -427,7 +427,7 @@ void FlowControl::sample(Timestamp timestamp, std::uint64_t opsApplied) { return; } - stdx::lock_guard<stdx::mutex> lk(_sampledOpsMutex); + stdx::lock_guard<Latch> lk(_sampledOpsMutex); _numOpsSinceStartup += opsApplied; if (_numOpsSinceStartup - _lastSample < static_cast<std::size_t>(gFlowControlSamplePeriod.load())) { @@ -469,7 +469,7 @@ void FlowControl::sample(Timestamp timestamp, std::uint64_t opsApplied) { void FlowControl::_trimSamples(const Timestamp trimTo) { int numTrimmed = 0; - stdx::lock_guard<stdx::mutex> lk(_sampledOpsMutex); + stdx::lock_guard<Latch> lk(_sampledOpsMutex); // Always leave at least two samples for calculating `locksPerOp`. while (_sampledOpsApplied.size() > 2 && std::get<0>(_sampledOpsApplied.front()) < trimTo.asULL()) { diff --git a/src/mongo/db/storage/flow_control.h b/src/mongo/db/storage/flow_control.h index 64f0d0b1d00..17b465b9d21 100644 --- a/src/mongo/db/storage/flow_control.h +++ b/src/mongo/db/storage/flow_control.h @@ -37,7 +37,7 @@ #include "mongo/db/repl/replication_coordinator_fwd.h" #include "mongo/db/service_context.h" #include "mongo/platform/atomic_word.h" -#include "mongo/stdx/mutex.h" +#include "mongo/platform/mutex.h" namespace mongo { @@ -125,7 +125,7 @@ private: // Use an int64_t as this is serialized to bson which does not support unsigned 64-bit numbers. AtomicWord<std::int64_t> _isLaggedTimeMicros{0}; - mutable stdx::mutex _sampledOpsMutex; + mutable Mutex _sampledOpsMutex = MONGO_MAKE_LATCH("FlowControl::_sampledOpsMutex"); std::deque<Sample> _sampledOpsApplied; // These values are used in the sampling process. diff --git a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp index ef5b441d989..44337fffc49 100644 --- a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp +++ b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp @@ -46,7 +46,7 @@ KVDropPendingIdentReaper::KVDropPendingIdentReaper(KVEngine* engine) : _engine(e void KVDropPendingIdentReaper::addDropPendingIdent(const Timestamp& dropTimestamp, const NamespaceString& nss, StringData ident) { - stdx::lock_guard<stdx::mutex> lock(_mutex); + stdx::lock_guard<Latch> lock(_mutex); const auto equalRange = _dropPendingIdents.equal_range(dropTimestamp); const auto& lowerBound = equalRange.first; const auto& upperBound = equalRange.second; @@ -65,7 +65,7 @@ void KVDropPendingIdentReaper::addDropPendingIdent(const Timestamp& dropTimestam } boost::optional<Timestamp> KVDropPendingIdentReaper::getEarliestDropTimestamp() const { - stdx::lock_guard<stdx::mutex> lock(_mutex); + stdx::lock_guard<Latch> lock(_mutex); auto it = _dropPendingIdents.cbegin(); if (it == _dropPendingIdents.cend()) { return boost::none; @@ -74,7 +74,7 @@ boost::optional<Timestamp> KVDropPendingIdentReaper::getEarliestDropTimestamp() } std::set<std::string> KVDropPendingIdentReaper::getAllIdents() const { - stdx::lock_guard<stdx::mutex> lock(_mutex); + stdx::lock_guard<Latch> lock(_mutex); std::set<std::string> idents; for (const auto& entry : _dropPendingIdents) { const auto& identInfo = entry.second; @@ -87,7 +87,7 @@ std::set<std::string> KVDropPendingIdentReaper::getAllIdents() const { void KVDropPendingIdentReaper::dropIdentsOlderThan(OperationContext* opCtx, const Timestamp& ts) { DropPendingIdents toDrop; { - stdx::lock_guard<stdx::mutex> lock(_mutex); + stdx::lock_guard<Latch> lock(_mutex); for (auto it = _dropPendingIdents.cbegin(); it != _dropPendingIdents.cend() && it->first < ts; ++it) { @@ -125,7 +125,7 @@ void KVDropPendingIdentReaper::dropIdentsOlderThan(OperationContext* opCtx, cons { // Entries must be removed AFTER drops are completed, so that getEarliestDropTimestamp() // returns appropriate results. - stdx::lock_guard<stdx::mutex> lock(_mutex); + stdx::lock_guard<Latch> lock(_mutex); for (const auto& timestampAndIdentInfo : toDrop) { const auto& dropTimestamp = timestampAndIdentInfo.first; // This may return zero if _dropPendingIdents was cleared using clearDropPendingState(). @@ -135,7 +135,7 @@ void KVDropPendingIdentReaper::dropIdentsOlderThan(OperationContext* opCtx, cons } void KVDropPendingIdentReaper::clearDropPendingState() { - stdx::lock_guard<stdx::mutex> lock(_mutex); + stdx::lock_guard<Latch> lock(_mutex); _dropPendingIdents.clear(); } diff --git a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.h b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.h index c249d9af0ba..75f13690a3d 100644 --- a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.h +++ b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.h @@ -38,7 +38,7 @@ #include "mongo/bson/timestamp.h" #include "mongo/db/namespace_string.h" #include "mongo/db/storage/kv/kv_engine.h" -#include "mongo/stdx/mutex.h" +#include "mongo/platform/mutex.h" namespace mongo { @@ -113,7 +113,7 @@ private: KVEngine* const _engine; // Guards access to member variables below. - mutable stdx::mutex _mutex; + mutable Mutex _mutex = MONGO_MAKE_LATCH("KVDropPendingIdentReaper::_mutex"); // Drop-pending idents. Ordered by drop timestamp. DropPendingIdents _dropPendingIdents; diff --git a/src/mongo/db/storage/kv/kv_prefix.cpp b/src/mongo/db/storage/kv/kv_prefix.cpp index 6b88dc22c3b..1a54a82f6a1 100644 --- a/src/mongo/db/storage/kv/kv_prefix.cpp +++ b/src/mongo/db/storage/kv/kv_prefix.cpp @@ -31,7 +31,7 @@ namespace mongo { int64_t KVPrefix::_nextValue = 0; -stdx::mutex KVPrefix::_nextValueMutex; +Mutex KVPrefix::_nextValueMutex = MONGO_MAKE_LATCH(); const KVPrefix KVPrefix::kNotPrefixed = KVPrefix(-1); std::string KVPrefix::toString() const { @@ -54,7 +54,7 @@ std::string KVPrefix::toString() const { return; } - stdx::lock_guard<stdx::mutex> lk(_nextValueMutex); + stdx::lock_guard<Latch> lk(_nextValueMutex); _nextValue = largestPrefix._value + 1; } @@ -67,7 +67,7 @@ std::string KVPrefix::toString() const { } /* static */ KVPrefix KVPrefix::generateNextPrefix() { - stdx::lock_guard<stdx::mutex> lk(_nextValueMutex); + stdx::lock_guard<Latch> lk(_nextValueMutex); return KVPrefix(_nextValue++); } } // namespace mongo diff --git a/src/mongo/db/storage/kv/kv_prefix.h b/src/mongo/db/storage/kv/kv_prefix.h index 6a785dc19db..45a1e891c0e 100644 --- a/src/mongo/db/storage/kv/kv_prefix.h +++ b/src/mongo/db/storage/kv/kv_prefix.h @@ -33,7 +33,7 @@ #include "mongo/bson/util/builder.h" #include "mongo/db/namespace_string.h" #include "mongo/db/storage/storage_options.h" -#include "mongo/stdx/mutex.h" +#include "mongo/platform/mutex.h" namespace mongo { @@ -93,7 +93,7 @@ private: explicit KVPrefix(int64_t value) : _value(value) {} int64_t _value; - static stdx::mutex _nextValueMutex; + static Mutex _nextValueMutex; static int64_t _nextValue; }; diff --git a/src/mongo/db/storage/kv/storage_engine_test.cpp b/src/mongo/db/storage/kv/storage_engine_test.cpp index 2aae21eafb4..cf3f7d10b70 100644 --- a/src/mongo/db/storage/kv/storage_engine_test.cpp +++ b/src/mongo/db/storage/kv/storage_engine_test.cpp @@ -431,13 +431,13 @@ TEST_F(TimestampKVEngineTest, TimestampListeners) { } TEST_F(TimestampKVEngineTest, TimestampMonitorNotifiesListeners) { - stdx::mutex mutex; + auto mutex = MONGO_MAKE_LATCH(); stdx::condition_variable cv; bool changes[4] = {false, false, false, false}; TimestampListener first(checkpoint, [&](Timestamp timestamp) { - stdx::lock_guard<stdx::mutex> lock(mutex); + stdx::lock_guard<Latch> lock(mutex); if (!changes[0]) { changes[0] = true; cv.notify_all(); @@ -445,7 +445,7 @@ TEST_F(TimestampKVEngineTest, TimestampMonitorNotifiesListeners) { }); TimestampListener second(oldest, [&](Timestamp timestamp) { - stdx::lock_guard<stdx::mutex> lock(mutex); + stdx::lock_guard<Latch> lock(mutex); if (!changes[1]) { changes[1] = true; cv.notify_all(); @@ -453,7 +453,7 @@ TEST_F(TimestampKVEngineTest, TimestampMonitorNotifiesListeners) { }); TimestampListener third(stable, [&](Timestamp timestamp) { - stdx::lock_guard<stdx::mutex> lock(mutex); + stdx::lock_guard<Latch> lock(mutex); if (!changes[2]) { changes[2] = true; cv.notify_all(); @@ -461,7 +461,7 @@ TEST_F(TimestampKVEngineTest, TimestampMonitorNotifiesListeners) { }); TimestampListener fourth(stable, [&](Timestamp timestamp) { - stdx::lock_guard<stdx::mutex> lock(mutex); + stdx::lock_guard<Latch> lock(mutex); if (!changes[3]) { changes[3] = true; cv.notify_all(); @@ -474,7 +474,7 @@ TEST_F(TimestampKVEngineTest, TimestampMonitorNotifiesListeners) { _storageEngine->getTimestampMonitor()->addListener(&fourth); // Wait until all 4 listeners get notified at least once. - stdx::unique_lock<stdx::mutex> lk(mutex); + stdx::unique_lock<Latch> lk(mutex); cv.wait(lk, [&] { for (auto const& change : changes) { if (!change) { diff --git a/src/mongo/db/storage/mobile/mobile_kv_engine.h b/src/mongo/db/storage/mobile/mobile_kv_engine.h index 3762ccf0878..0e0b3ab17e3 100644 --- a/src/mongo/db/storage/mobile/mobile_kv_engine.h +++ b/src/mongo/db/storage/mobile/mobile_kv_engine.h @@ -35,7 +35,7 @@ #include "mongo/db/storage/kv/kv_engine.h" #include "mongo/db/storage/mobile/mobile_options.h" #include "mongo/db/storage/mobile/mobile_session_pool.h" -#include "mongo/stdx/mutex.h" +#include "mongo/platform/mutex.h" #include "mongo/util/periodic_runner.h" #include "mongo/util/string_map.h" @@ -124,7 +124,7 @@ public: std::vector<std::string> getAllIdents(OperationContext* opCtx) const override; void setJournalListener(JournalListener* jl) override { - stdx::unique_lock<stdx::mutex> lk(_mutex); + stdx::unique_lock<Latch> lk(_mutex); _journalListener = jl; } @@ -143,7 +143,7 @@ public: private: void maybeVacuum(Client* client, Date_t deadline); - mutable stdx::mutex _mutex; + mutable Mutex _mutex = MONGO_MAKE_LATCH("MobileKVEngine::_mutex"); void _initDBPath(const std::string& path); std::int32_t _setSQLitePragma(const std::string& pragma, sqlite3* session); diff --git a/src/mongo/db/storage/mobile/mobile_record_store.cpp b/src/mongo/db/storage/mobile/mobile_record_store.cpp index 7543fcb1617..f60142d95fe 100644 --- a/src/mongo/db/storage/mobile/mobile_record_store.cpp +++ b/src/mongo/db/storage/mobile/mobile_record_store.cpp @@ -233,7 +233,7 @@ void MobileRecordStore::_initDataSizeIfNeeded_inlock(OperationContext* opCtx) co } long long MobileRecordStore::dataSize(OperationContext* opCtx) const { - stdx::lock_guard<stdx::mutex> lock(_dataSizeMutex); + stdx::lock_guard<Latch> lock(_dataSizeMutex); _initDataSizeIfNeeded_inlock(opCtx); return _dataSize; } @@ -255,7 +255,7 @@ void MobileRecordStore::_initNumRecsIfNeeded_inlock(OperationContext* opCtx) con } long long MobileRecordStore::numRecords(OperationContext* opCtx) const { - stdx::lock_guard<stdx::mutex> lock(_numRecsMutex); + stdx::lock_guard<Latch> lock(_numRecsMutex); _initNumRecsIfNeeded_inlock(opCtx); return _numRecs; } @@ -420,7 +420,7 @@ public: void commit(boost::optional<Timestamp>) override {} void rollback() override { - stdx::lock_guard<stdx::mutex> lock(_rs->_numRecsMutex); + stdx::lock_guard<Latch> lock(_rs->_numRecsMutex); _rs->_numRecs -= _diff; } @@ -430,7 +430,7 @@ private: }; void MobileRecordStore::_changeNumRecs(OperationContext* opCtx, int64_t diff) { - stdx::lock_guard<stdx::mutex> lock(_numRecsMutex); + stdx::lock_guard<Latch> lock(_numRecsMutex); opCtx->recoveryUnit()->registerChange(std::make_unique<NumRecsChange>(this, diff)); _initNumRecsIfNeeded_inlock(opCtx); _numRecs += diff; @@ -441,7 +441,7 @@ bool MobileRecordStore::_resetNumRecsIfNeeded(OperationContext* opCtx, int64_t n int64_t currNumRecs = numRecords(opCtx); if (currNumRecs != newNumRecs) { wasReset = true; - stdx::lock_guard<stdx::mutex> lock(_numRecsMutex); + stdx::lock_guard<Latch> lock(_numRecsMutex); _numRecs = newNumRecs; } return wasReset; @@ -457,7 +457,7 @@ public: void commit(boost::optional<Timestamp>) override {} void rollback() override { - stdx::lock_guard<stdx::mutex> lock(_rs->_dataSizeMutex); + stdx::lock_guard<Latch> lock(_rs->_dataSizeMutex); _rs->_dataSize -= _diff; } @@ -467,7 +467,7 @@ private: }; void MobileRecordStore::_changeDataSize(OperationContext* opCtx, int64_t diff) { - stdx::lock_guard<stdx::mutex> lock(_dataSizeMutex); + stdx::lock_guard<Latch> lock(_dataSizeMutex); opCtx->recoveryUnit()->registerChange(std::make_unique<DataSizeChange>(this, diff)); _initDataSizeIfNeeded_inlock(opCtx); _dataSize += diff; @@ -479,7 +479,7 @@ bool MobileRecordStore::_resetDataSizeIfNeeded(OperationContext* opCtx, int64_t if (currDataSize != _dataSize) { wasReset = true; - stdx::lock_guard<stdx::mutex> lock(_dataSizeMutex); + stdx::lock_guard<Latch> lock(_dataSizeMutex); _dataSize = newDataSize; } return wasReset; diff --git a/src/mongo/db/storage/mobile/mobile_record_store.h b/src/mongo/db/storage/mobile/mobile_record_store.h index b08c14c9e44..d9457edd985 100644 --- a/src/mongo/db/storage/mobile/mobile_record_store.h +++ b/src/mongo/db/storage/mobile/mobile_record_store.h @@ -167,7 +167,7 @@ private: bool _resetNumRecsIfNeeded(OperationContext* opCtx, int64_t newNumRecs); mutable int64_t _numRecs; - mutable stdx::mutex _numRecsMutex; + mutable Mutex _numRecsMutex = MONGO_MAKE_LATCH("MobileRecordStore::_numRecsMutex"); mutable bool _isNumRecsInitialized = false; /** @@ -188,7 +188,7 @@ private: bool _resetDataSizeIfNeeded(OperationContext* opCtx, int64_t newDataSize); mutable int64_t _dataSize; - mutable stdx::mutex _dataSizeMutex; + mutable Mutex _dataSizeMutex = MONGO_MAKE_LATCH("MobileRecordStore::_dataSizeMutex"); mutable bool _isDataSizeInitialized = false; }; diff --git a/src/mongo/db/storage/mobile/mobile_session_pool.cpp b/src/mongo/db/storage/mobile/mobile_session_pool.cpp index 179a30cbe5e..a8a211bcc6b 100644 --- a/src/mongo/db/storage/mobile/mobile_session_pool.cpp +++ b/src/mongo/db/storage/mobile/mobile_session_pool.cpp @@ -43,7 +43,7 @@ #include "mongo/db/storage/mobile/mobile_session_pool.h" #include "mongo/db/storage/mobile/mobile_sqlite_statement.h" #include "mongo/db/storage/mobile/mobile_util.h" -#include "mongo/stdx/mutex.h" +#include "mongo/platform/mutex.h" #include "mongo/util/log.h" namespace mongo { @@ -105,7 +105,7 @@ MobileSessionPool::~MobileSessionPool() { } std::unique_ptr<MobileSession> MobileSessionPool::getSession(OperationContext* opCtx) { - stdx::unique_lock<stdx::mutex> lk(_mutex); + stdx::unique_lock<Latch> lk(_mutex); // We should never be able to get here after _shuttingDown is set, because no new operations // should be allowed to start. @@ -141,13 +141,13 @@ void MobileSessionPool::releaseSession(MobileSession* session) { if (!failedDropsQueue.isEmpty()) failedDropsQueue.execAndDequeueOp(session); - stdx::lock_guard<stdx::mutex> lk(_mutex); + stdx::lock_guard<Latch> lk(_mutex); _sessions.push_back(session->getSession()); _releasedSessionNotifier.notify_one(); } void MobileSessionPool::shutDown() { - stdx::unique_lock<stdx::mutex> lk(_mutex); + stdx::unique_lock<Latch> lk(_mutex); _shuttingDown = true; // Retrieve the operation context from the thread's client if the client exists. diff --git a/src/mongo/db/storage/mobile/mobile_session_pool.h b/src/mongo/db/storage/mobile/mobile_session_pool.h index 08586e0ece8..031953cdfb3 100644 --- a/src/mongo/db/storage/mobile/mobile_session_pool.h +++ b/src/mongo/db/storage/mobile/mobile_session_pool.h @@ -37,7 +37,7 @@ #include "mongo/db/operation_context.h" #include "mongo/db/storage/mobile/mobile_options.h" #include "mongo/db/storage/mobile/mobile_session.h" -#include "mongo/stdx/mutex.h" +#include "mongo/platform/mutex.h" namespace mongo { class MobileSession; @@ -58,7 +58,7 @@ public: private: AtomicWord<bool> _isEmpty; - stdx::mutex _queueMutex; + Mutex _queueMutex = MONGO_MAKE_LATCH("MobileDelayedOpQueue::_queueMutex"); std::queue<std::string> _opQueryQueue; }; @@ -107,7 +107,7 @@ private: sqlite3* _popSession_inlock(); // This is used to lock the _sessions vector. - stdx::mutex _mutex; + Mutex _mutex; stdx::condition_variable _releasedSessionNotifier; std::string _path; diff --git a/src/mongo/db/storage/storage_engine_impl.cpp b/src/mongo/db/storage/storage_engine_impl.cpp index 8854f359119..a43deca1687 100644 --- a/src/mongo/db/storage/storage_engine_impl.cpp +++ b/src/mongo/db/storage/storage_engine_impl.cpp @@ -900,7 +900,7 @@ StorageEngineImpl::TimestampMonitor::TimestampMonitor(KVEngine* engine, Periodic StorageEngineImpl::TimestampMonitor::~TimestampMonitor() { log() << "Timestamp monitor shutting down"; - stdx::lock_guard<stdx::mutex> lock(_monitorMutex); + stdx::lock_guard<Latch> lock(_monitorMutex); invariant(_listeners.empty()); } @@ -912,7 +912,7 @@ void StorageEngineImpl::TimestampMonitor::startup() { "TimestampMonitor", [&](Client* client) { { - stdx::lock_guard<stdx::mutex> lock(_monitorMutex); + stdx::lock_guard<Latch> lock(_monitorMutex); if (_listeners.empty()) { return; } @@ -979,7 +979,7 @@ void StorageEngineImpl::TimestampMonitor::startup() { } void StorageEngineImpl::TimestampMonitor::notifyAll(TimestampType type, Timestamp newTimestamp) { - stdx::lock_guard<stdx::mutex> lock(_monitorMutex); + stdx::lock_guard<Latch> lock(_monitorMutex); for (auto& listener : _listeners) { if (listener->getType() == type) { listener->notify(newTimestamp); @@ -988,7 +988,7 @@ void StorageEngineImpl::TimestampMonitor::notifyAll(TimestampType type, Timestam } void StorageEngineImpl::TimestampMonitor::addListener(TimestampListener* listener) { - stdx::lock_guard<stdx::mutex> lock(_monitorMutex); + stdx::lock_guard<Latch> lock(_monitorMutex); if (std::find(_listeners.begin(), _listeners.end(), listener) != _listeners.end()) { bool listenerAlreadyRegistered = true; invariant(!listenerAlreadyRegistered); @@ -997,7 +997,7 @@ void StorageEngineImpl::TimestampMonitor::addListener(TimestampListener* listene } void StorageEngineImpl::TimestampMonitor::removeListener(TimestampListener* listener) { - stdx::lock_guard<stdx::mutex> lock(_monitorMutex); + stdx::lock_guard<Latch> lock(_monitorMutex); if (std::find(_listeners.begin(), _listeners.end(), listener) == _listeners.end()) { bool listenerNotRegistered = true; invariant(!listenerNotRegistered); diff --git a/src/mongo/db/storage/storage_engine_impl.h b/src/mongo/db/storage/storage_engine_impl.h index 07f2cf6f42d..64f9774f6e8 100644 --- a/src/mongo/db/storage/storage_engine_impl.h +++ b/src/mongo/db/storage/storage_engine_impl.h @@ -46,7 +46,7 @@ #include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/storage_engine_interface.h" #include "mongo/db/storage/temporary_record_store.h" -#include "mongo/stdx/mutex.h" +#include "mongo/platform/mutex.h" #include "mongo/util/periodic_runner.h" namespace mongo { @@ -290,7 +290,7 @@ public: PeriodicRunner* _periodicRunner; // Protects access to _listeners below. - stdx::mutex _monitorMutex; + Mutex _monitorMutex = MONGO_MAKE_LATCH("TimestampMonitor::_monitorMutex"); std::vector<TimestampListener*> _listeners; // This should remain as the last member variable so that its destructor gets executed first diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp index 556a01a2efb..87a0fa73e0f 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp @@ -187,7 +187,7 @@ public: while (!_shuttingDown.load()) { { - stdx::unique_lock<stdx::mutex> lock(_mutex); + stdx::unique_lock<Latch> lock(_mutex); MONGO_IDLE_THREAD_BLOCK; // Check every 10 seconds or sooner in the debug builds _condvar.wait_for(lock, stdx::chrono::seconds(kDebugBuild ? 1 : 10)); @@ -202,7 +202,7 @@ public: void shutdown() { _shuttingDown.store(true); { - stdx::unique_lock<stdx::mutex> lock(_mutex); + stdx::unique_lock<Latch> lock(_mutex); // Wake up the session sweeper thread early, we do not want the shutdown // to wait for us too long. _condvar.notify_one(); @@ -214,7 +214,7 @@ private: WiredTigerSessionCache* _sessionCache; AtomicWord<bool> _shuttingDown{false}; - stdx::mutex _mutex; // protects _condvar + Mutex _mutex = MONGO_MAKE_LATCH("WiredTigerSessionSweeper::_mutex"); // protects _condvar // The session sweeper thread idles on this condition variable for a particular time duration // between cleaning up expired sessions. It can be triggered early to expediate shutdown. stdx::condition_variable _condvar; @@ -322,7 +322,7 @@ public: auto opCtx = tc->makeOperationContext(); { - stdx::unique_lock<stdx::mutex> lock(_mutex); + stdx::unique_lock<Latch> lock(_mutex); MONGO_IDLE_THREAD_BLOCK; _condvar.wait_for(lock, stdx::chrono::seconds(static_cast<std::int64_t>( @@ -395,7 +395,7 @@ public: if (oplogNeededForRollback.isOK()) { // Now that the checkpoint is durable, publish the oplog needed to recover // from it. - stdx::lock_guard<stdx::mutex> lk(_oplogNeededForCrashRecoveryMutex); + stdx::lock_guard<Latch> lk(_oplogNeededForCrashRecoveryMutex); _oplogNeededForCrashRecovery.store( oplogNeededForRollback.getValue().asULL()); } @@ -440,7 +440,7 @@ public: _hasTriggeredFirstStableCheckpoint = true; log() << "Triggering the first stable checkpoint. Initial Data: " << initialData << " PrevStable: " << prevStable << " CurrStable: " << currStable; - stdx::unique_lock<stdx::mutex> lock(_mutex); + stdx::unique_lock<Latch> lock(_mutex); _condvar.notify_one(); } } @@ -454,14 +454,14 @@ public: * _oplogNeededForCrashRecovery will not change during assignment. */ void assignOplogNeededForCrashRecoveryTo(boost::optional<Timestamp>* timestamp) { - stdx::lock_guard<stdx::mutex> lk(_oplogNeededForCrashRecoveryMutex); + stdx::lock_guard<Latch> lk(_oplogNeededForCrashRecoveryMutex); *timestamp = Timestamp(_oplogNeededForCrashRecovery.load()); } void shutdown() { _shuttingDown.store(true); { - stdx::unique_lock<stdx::mutex> lock(_mutex); + stdx::unique_lock<Latch> lock(_mutex); // Wake up the checkpoint thread early, to take a final checkpoint before shutting // down, if one has not coincidentally just been taken. _condvar.notify_one(); @@ -473,7 +473,8 @@ private: WiredTigerKVEngine* _wiredTigerKVEngine; WiredTigerSessionCache* _sessionCache; - stdx::mutex _mutex; // protects _condvar + Mutex _mutex = MONGO_MAKE_LATCH("WiredTigerCheckpointThread::_mutex"); + ; // protects _condvar // The checkpoint thread idles on this condition variable for a particular time duration between // taking checkpoints. It can be triggered early to expediate immediate checkpointing. stdx::condition_variable _condvar; @@ -482,7 +483,8 @@ private: bool _hasTriggeredFirstStableCheckpoint = false; - stdx::mutex _oplogNeededForCrashRecoveryMutex; + Mutex _oplogNeededForCrashRecoveryMutex = + MONGO_MAKE_LATCH("WiredTigerCheckpointThread::_oplogNeededForCrashRecoveryMutex"); AtomicWord<std::uint64_t> _oplogNeededForCrashRecovery; }; @@ -1064,7 +1066,7 @@ StatusWith<std::vector<std::string>> WiredTigerKVEngine::beginNonBlockingBackup( uassert(51034, "Cannot open backup cursor with in-memory mode.", !isEphemeral()); // Oplog truncation thread won't remove oplog since the checkpoint pinned by the backup cursor. - stdx::lock_guard<stdx::mutex> lock(_oplogPinnedByBackupMutex); + stdx::lock_guard<Latch> lock(_oplogPinnedByBackupMutex); _checkpointThread->assignOplogNeededForCrashRecoveryTo(&_oplogPinnedByBackup); auto pinOplogGuard = makeGuard([&] { _oplogPinnedByBackup = boost::none; }); @@ -1099,7 +1101,7 @@ StatusWith<std::vector<std::string>> WiredTigerKVEngine::beginNonBlockingBackup( void WiredTigerKVEngine::endNonBlockingBackup(OperationContext* opCtx) { _backupSession.reset(); // Oplog truncation thread can now remove the pinned oplog. - stdx::lock_guard<stdx::mutex> lock(_oplogPinnedByBackupMutex); + stdx::lock_guard<Latch> lock(_oplogPinnedByBackupMutex); _oplogPinnedByBackup = boost::none; _backupCursor = nullptr; } @@ -1140,7 +1142,7 @@ void WiredTigerKVEngine::syncSizeInfo(bool sync) const { void WiredTigerKVEngine::setOldestActiveTransactionTimestampCallback( StorageEngine::OldestActiveTransactionTimestampCallback callback) { - stdx::lock_guard<stdx::mutex> lk(_oldestActiveTransactionTimestampCallbackMutex); + stdx::lock_guard<Latch> lk(_oldestActiveTransactionTimestampCallbackMutex); _oldestActiveTransactionTimestampCallback = std::move(callback); }; @@ -1403,7 +1405,7 @@ Status WiredTigerKVEngine::dropIdent(OperationContext* opCtx, StringData ident) if (ret == EBUSY) { // this is expected, queue it up { - stdx::lock_guard<stdx::mutex> lk(_identToDropMutex); + stdx::lock_guard<Latch> lk(_identToDropMutex); _identToDrop.push_front(uri); } _sessionCache->closeCursorsForQueuedDrops(); @@ -1422,7 +1424,7 @@ std::list<WiredTigerCachedCursor> WiredTigerKVEngine::filterCursorsWithQueuedDro std::list<WiredTigerCachedCursor>* cache) { std::list<WiredTigerCachedCursor> toDrop; - stdx::lock_guard<stdx::mutex> lk(_identToDropMutex); + stdx::lock_guard<Latch> lk(_identToDropMutex); if (_identToDrop.empty()) return toDrop; @@ -1456,7 +1458,7 @@ bool WiredTigerKVEngine::haveDropsQueued() const { _previousCheckedDropsQueued = now; // Don't wait for the mutex: if we can't get it, report that no drops are queued. - stdx::unique_lock<stdx::mutex> lk(_identToDropMutex, stdx::defer_lock); + stdx::unique_lock<Latch> lk(_identToDropMutex, stdx::defer_lock); return lk.try_lock() && !_identToDrop.empty(); } @@ -1466,7 +1468,7 @@ void WiredTigerKVEngine::dropSomeQueuedIdents() { WiredTigerSession session(_conn); { - stdx::lock_guard<stdx::mutex> lk(_identToDropMutex); + stdx::lock_guard<Latch> lk(_identToDropMutex); numInQueue = _identToDrop.size(); } @@ -1479,7 +1481,7 @@ void WiredTigerKVEngine::dropSomeQueuedIdents() { for (int i = 0; i < numToDelete; i++) { string uri; { - stdx::lock_guard<stdx::mutex> lk(_identToDropMutex); + stdx::lock_guard<Latch> lk(_identToDropMutex); if (_identToDrop.empty()) break; uri = _identToDrop.front(); @@ -1490,7 +1492,7 @@ void WiredTigerKVEngine::dropSomeQueuedIdents() { LOG(1) << "WT queued drop of " << uri << " res " << ret; if (ret == EBUSY) { - stdx::lock_guard<stdx::mutex> lk(_identToDropMutex); + stdx::lock_guard<Latch> lk(_identToDropMutex); _identToDrop.push_back(uri); } else { invariantWTOK(ret); @@ -1871,7 +1873,7 @@ StatusWith<Timestamp> WiredTigerKVEngine::getOplogNeededForRollback() const { auto stableTimestamp = _stableTimestamp.load(); // Only one thread can set or execute this callback. - stdx::lock_guard<stdx::mutex> lk(_oldestActiveTransactionTimestampCallbackMutex); + stdx::lock_guard<Latch> lk(_oldestActiveTransactionTimestampCallbackMutex); boost::optional<Timestamp> oldestActiveTransactionTimestamp; if (_oldestActiveTransactionTimestampCallback) { auto status = _oldestActiveTransactionTimestampCallback(Timestamp(stableTimestamp)); @@ -1904,7 +1906,7 @@ boost::optional<Timestamp> WiredTigerKVEngine::getOplogNeededForCrashRecovery() Timestamp WiredTigerKVEngine::getPinnedOplog() const { { - stdx::lock_guard<stdx::mutex> lock(_oplogPinnedByBackupMutex); + stdx::lock_guard<Latch> lock(_oplogPinnedByBackupMutex); if (!storageGlobalParams.allowOplogTruncation) { // If oplog truncation is not allowed, then return the min timestamp so that no history // is @@ -1956,14 +1958,14 @@ bool WiredTigerKVEngine::supportsOplogStones() const { void WiredTigerKVEngine::startOplogManager(OperationContext* opCtx, const std::string& uri, WiredTigerRecordStore* oplogRecordStore) { - stdx::lock_guard<stdx::mutex> lock(_oplogManagerMutex); + stdx::lock_guard<Latch> lock(_oplogManagerMutex); if (_oplogManagerCount == 0) _oplogManager->start(opCtx, uri, oplogRecordStore); _oplogManagerCount++; } void WiredTigerKVEngine::haltOplogManager() { - stdx::unique_lock<stdx::mutex> lock(_oplogManagerMutex); + stdx::unique_lock<Latch> lock(_oplogManagerMutex); invariant(_oplogManagerCount > 0); _oplogManagerCount--; if (_oplogManagerCount == 0) { diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h index 7b98f9fd388..39a06e1f213 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h @@ -45,7 +45,7 @@ #include "mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h" #include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" #include "mongo/db/storage/wiredtiger/wiredtiger_util.h" -#include "mongo/stdx/mutex.h" +#include "mongo/platform/mutex.h" #include "mongo/util/elapsed_tracker.h" namespace mongo { @@ -409,7 +409,8 @@ private: std::uint64_t _getCheckpointTimestamp() const; - mutable stdx::mutex _oldestActiveTransactionTimestampCallbackMutex; + mutable Mutex _oldestActiveTransactionTimestampCallbackMutex = + MONGO_MAKE_LATCH("::_oldestActiveTransactionTimestampCallbackMutex"); StorageEngine::OldestActiveTransactionTimestampCallback _oldestActiveTransactionTimestampCallback; @@ -420,7 +421,7 @@ private: ClockSource* const _clockSource; // Mutex to protect use of _oplogManagerCount by this instance of KV engine. - mutable stdx::mutex _oplogManagerMutex; + mutable Mutex _oplogManagerMutex = MONGO_MAKE_LATCH("::_oplogManagerMutex"); std::size_t _oplogManagerCount = 0; std::unique_ptr<WiredTigerOplogManager> _oplogManager; @@ -451,15 +452,16 @@ private: std::string _rsOptions; std::string _indexOptions; - mutable stdx::mutex _dropAllQueuesMutex; - mutable stdx::mutex _identToDropMutex; + mutable Mutex _dropAllQueuesMutex = MONGO_MAKE_LATCH("WiredTigerKVEngine::_dropAllQueuesMutex"); + mutable Mutex _identToDropMutex = MONGO_MAKE_LATCH("WiredTigerKVEngine::_identToDropMutex"); std::list<std::string> _identToDrop; mutable Date_t _previousCheckedDropsQueued; std::unique_ptr<WiredTigerSession> _backupSession; WT_CURSOR* _backupCursor; - mutable stdx::mutex _oplogPinnedByBackupMutex; + mutable Mutex _oplogPinnedByBackupMutex = + MONGO_MAKE_LATCH("WiredTigerKVEngine::_oplogPinnedByBackupMutex"); boost::optional<Timestamp> _oplogPinnedByBackup; Timestamp _recoveryTimestamp; diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp index fbf0b9450a3..647fe8de738 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp @@ -37,7 +37,7 @@ #include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h" #include "mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h" #include "mongo/db/storage/wiredtiger/wiredtiger_util.h" -#include "mongo/stdx/mutex.h" +#include "mongo/platform/mutex.h" #include "mongo/util/concurrency/idle_thread_block.h" #include "mongo/util/log.h" #include "mongo/util/scopeguard.h" @@ -74,7 +74,7 @@ void WiredTigerOplogManager::start(OperationContext* opCtx, // Need to obtain the mutex before starting the thread, as otherwise it may race ahead // see _shuttingDown as true and quit prematurely. - stdx::lock_guard<stdx::mutex> lk(_oplogVisibilityStateMutex); + stdx::lock_guard<Latch> lk(_oplogVisibilityStateMutex); _oplogJournalThread = stdx::thread(&WiredTigerOplogManager::_oplogJournalThreadLoop, this, WiredTigerRecoveryUnit::get(opCtx)->getSessionCache(), @@ -86,7 +86,7 @@ void WiredTigerOplogManager::start(OperationContext* opCtx, void WiredTigerOplogManager::halt() { { - stdx::lock_guard<stdx::mutex> lk(_oplogVisibilityStateMutex); + stdx::lock_guard<Latch> lk(_oplogVisibilityStateMutex); invariant(_isRunning); _shuttingDown = true; _isRunning = false; @@ -120,7 +120,7 @@ void WiredTigerOplogManager::waitForAllEarlierOplogWritesToBeVisible( // Close transaction before we wait. opCtx->recoveryUnit()->abandonSnapshot(); - stdx::unique_lock<stdx::mutex> lk(_oplogVisibilityStateMutex); + stdx::unique_lock<Latch> lk(_oplogVisibilityStateMutex); // Prevent any scheduled journal flushes from being delayed and blocking this wait excessively. _opsWaitingForVisibility++; @@ -148,7 +148,7 @@ void WiredTigerOplogManager::waitForAllEarlierOplogWritesToBeVisible( } void WiredTigerOplogManager::triggerJournalFlush() { - stdx::lock_guard<stdx::mutex> lk(_oplogVisibilityStateMutex); + stdx::lock_guard<Latch> lk(_oplogVisibilityStateMutex); if (!_opsWaitingForJournal) { _opsWaitingForJournal = true; _opsWaitingForJournalCV.notify_one(); @@ -174,7 +174,7 @@ void WiredTigerOplogManager::_oplogJournalThreadLoop(WiredTigerSessionCache* ses // waitUntilDurable() call requiring an opCtx parameter. opCtx->swapLockState(std::make_unique<LockerImpl>()); - stdx::unique_lock<stdx::mutex> lk(_oplogVisibilityStateMutex); + stdx::unique_lock<Latch> lk(_oplogVisibilityStateMutex); { MONGO_IDLE_THREAD_BLOCK; _opsWaitingForJournalCV.wait(lk, @@ -251,7 +251,7 @@ std::uint64_t WiredTigerOplogManager::getOplogReadTimestamp() const { } void WiredTigerOplogManager::setOplogReadTimestamp(Timestamp ts) { - stdx::lock_guard<stdx::mutex> lk(_oplogVisibilityStateMutex); + stdx::lock_guard<Latch> lk(_oplogVisibilityStateMutex); _setOplogReadTimestamp(lk, ts.asULL()); } diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h index 9a82985fc28..09258c657f2 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h @@ -30,8 +30,8 @@ #pragma once #include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h" -#include "mongo/stdx/condition_variable.h" -#include "mongo/stdx/mutex.h" +#include "mongo/platform/condition_variable.h" +#include "mongo/platform/mutex.h" #include "mongo/stdx/thread.h" #include "mongo/util/concurrency/with_lock.h" @@ -60,7 +60,7 @@ public: void halt(); bool isRunning() { - stdx::lock_guard<stdx::mutex> lk(_oplogVisibilityStateMutex); + stdx::lock_guard<Latch> lk(_oplogVisibilityStateMutex); return _isRunning && !_shuttingDown; } @@ -89,7 +89,8 @@ private: void _setOplogReadTimestamp(WithLock, uint64_t newTimestamp); stdx::thread _oplogJournalThread; - mutable stdx::mutex _oplogVisibilityStateMutex; + mutable Mutex _oplogVisibilityStateMutex = + MONGO_MAKE_LATCH("WiredTigerOplogManager::_oplogVisibilityStateMutex"); mutable stdx::condition_variable _opsWaitingForJournalCV; // Signaled to trigger a journal flush. mutable stdx::condition_variable diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp index f0a12735423..0c4c1956e51 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp @@ -147,7 +147,7 @@ public: _oplogStones->_currentRecords.store(0); _oplogStones->_currentBytes.store(0); - stdx::lock_guard<stdx::mutex> lk(_oplogStones->_mutex); + stdx::lock_guard<Latch> lk(_oplogStones->_mutex); _oplogStones->_stones.clear(); } @@ -159,7 +159,7 @@ private: WiredTigerRecordStore::OplogStones::OplogStones(OperationContext* opCtx, WiredTigerRecordStore* rs) : _rs(rs) { - stdx::lock_guard<stdx::mutex> lk(_mutex); + stdx::lock_guard<Latch> lk(_mutex); invariant(rs->isCapped()); invariant(rs->cappedMaxSize() > 0); @@ -178,13 +178,13 @@ WiredTigerRecordStore::OplogStones::OplogStones(OperationContext* opCtx, WiredTi } bool WiredTigerRecordStore::OplogStones::isDead() { - stdx::lock_guard<stdx::mutex> lk(_oplogReclaimMutex); + stdx::lock_guard<Latch> lk(_oplogReclaimMutex); return _isDead; } void WiredTigerRecordStore::OplogStones::kill() { { - stdx::lock_guard<stdx::mutex> lk(_oplogReclaimMutex); + stdx::lock_guard<Latch> lk(_oplogReclaimMutex); _isDead = true; } _oplogReclaimCv.notify_one(); @@ -192,11 +192,11 @@ void WiredTigerRecordStore::OplogStones::kill() { void WiredTigerRecordStore::OplogStones::awaitHasExcessStonesOrDead() { // Wait until kill() is called or there are too many oplog stones. - stdx::unique_lock<stdx::mutex> lock(_oplogReclaimMutex); + stdx::unique_lock<Latch> lock(_oplogReclaimMutex); while (!_isDead) { { MONGO_IDLE_THREAD_BLOCK; - stdx::lock_guard<stdx::mutex> lk(_mutex); + stdx::lock_guard<Latch> lk(_mutex); if (hasExcessStones_inlock()) { // There are now excess oplog stones. However, there it may be necessary to keep // additional oplog. @@ -219,7 +219,7 @@ void WiredTigerRecordStore::OplogStones::awaitHasExcessStonesOrDead() { boost::optional<WiredTigerRecordStore::OplogStones::Stone> WiredTigerRecordStore::OplogStones::peekOldestStoneIfNeeded() const { - stdx::lock_guard<stdx::mutex> lk(_mutex); + stdx::lock_guard<Latch> lk(_mutex); if (!hasExcessStones_inlock()) { return {}; @@ -229,12 +229,12 @@ WiredTigerRecordStore::OplogStones::peekOldestStoneIfNeeded() const { } void WiredTigerRecordStore::OplogStones::popOldestStone() { - stdx::lock_guard<stdx::mutex> lk(_mutex); + stdx::lock_guard<Latch> lk(_mutex); _stones.pop_front(); } void WiredTigerRecordStore::OplogStones::createNewStoneIfNeeded(RecordId lastRecord) { - stdx::unique_lock<stdx::mutex> lk(_mutex, stdx::try_to_lock); + stdx::unique_lock<Latch> lk(_mutex, stdx::try_to_lock); if (!lk) { // Someone else is either already creating a new stone or popping the oldest one. In the // latter case, we let the next insert trigger the new stone's creation. @@ -275,7 +275,7 @@ void WiredTigerRecordStore::OplogStones::clearStonesOnCommit(OperationContext* o void WiredTigerRecordStore::OplogStones::updateStonesAfterCappedTruncateAfter( int64_t recordsRemoved, int64_t bytesRemoved, RecordId firstRemovedId) { - stdx::lock_guard<stdx::mutex> lk(_mutex); + stdx::lock_guard<Latch> lk(_mutex); int64_t numStonesToRemove = 0; int64_t recordsInStonesToRemove = 0; @@ -305,7 +305,7 @@ void WiredTigerRecordStore::OplogStones::updateStonesAfterCappedTruncateAfter( void WiredTigerRecordStore::OplogStones::setMinBytesPerStone(int64_t size) { invariant(size > 0); - stdx::lock_guard<stdx::mutex> lk(_mutex); + stdx::lock_guard<Latch> lk(_mutex); // Only allow changing the minimum bytes per stone if no data has been inserted. invariant(_stones.size() == 0 && _currentRecords.load() == 0); @@ -457,7 +457,7 @@ void WiredTigerRecordStore::OplogStones::_pokeReclaimThreadIfNeeded() { } void WiredTigerRecordStore::OplogStones::adjust(int64_t maxSize) { - stdx::lock_guard<stdx::mutex> lk(_mutex); + stdx::lock_guard<Latch> lk(_mutex); const unsigned long long kMinStonesToKeep = 10ULL; const unsigned long long kMaxStonesToKeep = 100ULL; @@ -699,7 +699,7 @@ WiredTigerRecordStore::WiredTigerRecordStore(WiredTigerKVEngine* kvEngine, WiredTigerRecordStore::~WiredTigerRecordStore() { { - stdx::lock_guard<stdx::mutex> lk(_cappedCallbackMutex); + stdx::lock_guard<Latch> lk(_cappedCallbackMutex); _shuttingDown = true; } @@ -784,7 +784,7 @@ const char* WiredTigerRecordStore::name() const { } bool WiredTigerRecordStore::inShutdown() const { - stdx::lock_guard<stdx::mutex> lk(_cappedCallbackMutex); + stdx::lock_guard<Latch> lk(_cappedCallbackMutex); return _shuttingDown; } @@ -1060,7 +1060,7 @@ int64_t WiredTigerRecordStore::_cappedDeleteAsNeeded_inlock(OperationContext* op ++docsRemoved; sizeSaved += old_value.size; - stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex); + stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex); if (_shuttingDown) break; @@ -1332,12 +1332,12 @@ bool WiredTigerRecordStore::isOpHidden_forTest(const RecordId& id) const { } bool WiredTigerRecordStore::haveCappedWaiters() { - stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex); + stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex); return _cappedCallback && _cappedCallback->haveCappedWaiters(); } void WiredTigerRecordStore::notifyCappedWaitersIfNeeded() { - stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex); + stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex); // This wakes up cursors blocking in await_data. if (_cappedCallback) { _cappedCallback->notifyCappedWaitersIfNeeded(); @@ -1743,7 +1743,7 @@ void WiredTigerRecordStore::cappedTruncateAfter(OperationContext* opCtx, // Compute the number and associated sizes of the records to delete. { - stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex); + stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex); do { if (_cappedCallback) { uassertStatusOK( diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h index 693987af2a6..044d57339d7 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h @@ -43,8 +43,8 @@ #include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h" #include "mongo/db/storage/wiredtiger/wiredtiger_size_storer.h" #include "mongo/platform/atomic_word.h" -#include "mongo/stdx/condition_variable.h" -#include "mongo/stdx/mutex.h" +#include "mongo/platform/condition_variable.h" +#include "mongo/platform/mutex.h" #include "mongo/stdx/thread.h" #include "mongo/util/fail_point_service.h" @@ -212,7 +212,7 @@ public: Status updateCappedSize(OperationContext* opCtx, long long cappedSize) final; void setCappedCallback(CappedCallback* cb) { - stdx::lock_guard<stdx::mutex> lk(_cappedCallbackMutex); + stdx::lock_guard<Latch> lk(_cappedCallbackMutex); _cappedCallback = cb; } @@ -343,9 +343,12 @@ private: RecordId _cappedFirstRecord; AtomicWord<long long> _cappedSleep; AtomicWord<long long> _cappedSleepMS; + + // guards _cappedCallback and _shuttingDown + mutable Mutex _cappedCallbackMutex = + MONGO_MAKE_LATCH("WiredTigerRecordStore::_cappedCallbackMutex"); CappedCallback* _cappedCallback; bool _shuttingDown; - mutable stdx::mutex _cappedCallbackMutex; // guards _cappedCallback and _shuttingDown // See comment in ::cappedDeleteAsNeeded int _cappedDeleteCheckCount; diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h index f6e9371c894..f88334ea85b 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h @@ -33,8 +33,8 @@ #include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h" #include "mongo/platform/atomic_word.h" -#include "mongo/stdx/condition_variable.h" -#include "mongo/stdx/mutex.h" +#include "mongo/platform/condition_variable.h" +#include "mongo/platform/mutex.h" namespace mongo { @@ -99,7 +99,7 @@ public: // size_t numStones() const { - stdx::lock_guard<stdx::mutex> lk(_mutex); + stdx::lock_guard<Latch> lk(_mutex); return _stones.size(); } @@ -129,7 +129,7 @@ private: WiredTigerRecordStore* _rs; - stdx::mutex _oplogReclaimMutex; + Mutex _oplogReclaimMutex; stdx::condition_variable _oplogReclaimCv; // True if '_rs' has been destroyed, e.g. due to repairDatabase being called on the "local" @@ -143,7 +143,8 @@ private: AtomicWord<long long> _currentRecords; // Number of records in the stone being filled. AtomicWord<long long> _currentBytes; // Number of bytes in the stone being filled. - mutable stdx::mutex _mutex; // Protects against concurrent access to the deque of oplog stones. + // Protects against concurrent access to the deque of oplog stones. + mutable Mutex _mutex = MONGO_MAKE_LATCH("OplogStones::_mutex"); std::deque<OplogStones::Stone> _stones; // front = oldest, back = newest. }; diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp index 104a5caa151..897b72eb762 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp @@ -266,7 +266,7 @@ void WiredTigerSessionCache::waitUntilDurable(OperationContext* opCtx, UniqueWiredTigerSession session = getSession(); WT_SESSION* s = session->getSession(); { - stdx::unique_lock<stdx::mutex> lk(_journalListenerMutex); + stdx::unique_lock<Latch> lk(_journalListenerMutex); JournalListener::Token token = _journalListener->getToken(); auto config = stableCheckpoint ? "use_timestamp=true" : "use_timestamp=false"; auto checkpointLock = _engine->getCheckpointLock(opCtx); @@ -280,7 +280,7 @@ void WiredTigerSessionCache::waitUntilDurable(OperationContext* opCtx, uint32_t start = _lastSyncTime.load(); // Do the remainder in a critical section that ensures only a single thread at a time // will attempt to synchronize. - stdx::unique_lock<stdx::mutex> lk(_lastSyncMutex); + stdx::unique_lock<Latch> lk(_lastSyncMutex); uint32_t current = _lastSyncTime.loadRelaxed(); // synchronized with writes through mutex if (current != start) { // Someone else synced already since we read lastSyncTime, so we're done! @@ -292,7 +292,7 @@ void WiredTigerSessionCache::waitUntilDurable(OperationContext* opCtx, // This gets the token (OpTime) from the last write, before flushing (either the journal, or a // checkpoint), and then reports that token (OpTime) as a durable write. - stdx::unique_lock<stdx::mutex> jlk(_journalListenerMutex); + stdx::unique_lock<Latch> jlk(_journalListenerMutex); JournalListener::Token token = _journalListener->getToken(); // Initialize on first use. @@ -316,7 +316,7 @@ void WiredTigerSessionCache::waitUntilDurable(OperationContext* opCtx, void WiredTigerSessionCache::waitUntilPreparedUnitOfWorkCommitsOrAborts(OperationContext* opCtx, std::uint64_t lastCount) { invariant(opCtx); - stdx::unique_lock<stdx::mutex> lk(_prepareCommittedOrAbortedMutex); + stdx::unique_lock<Latch> lk(_prepareCommittedOrAbortedMutex); if (lastCount == _prepareCommitOrAbortCounter.loadRelaxed()) { opCtx->waitForConditionOrInterrupt(_prepareCommittedOrAbortedCond, lk, [&] { return _prepareCommitOrAbortCounter.loadRelaxed() > lastCount; @@ -325,14 +325,14 @@ void WiredTigerSessionCache::waitUntilPreparedUnitOfWorkCommitsOrAborts(Operatio } void WiredTigerSessionCache::notifyPreparedUnitOfWorkHasCommittedOrAborted() { - stdx::unique_lock<stdx::mutex> lk(_prepareCommittedOrAbortedMutex); + stdx::unique_lock<Latch> lk(_prepareCommittedOrAbortedMutex); _prepareCommitOrAbortCounter.fetchAndAdd(1); _prepareCommittedOrAbortedCond.notify_all(); } void WiredTigerSessionCache::closeAllCursors(const std::string& uri) { - stdx::lock_guard<stdx::mutex> lock(_cacheLock); + stdx::lock_guard<Latch> lock(_cacheLock); for (SessionCache::iterator i = _sessions.begin(); i != _sessions.end(); i++) { (*i)->closeAllCursors(uri); } @@ -342,14 +342,14 @@ void WiredTigerSessionCache::closeCursorsForQueuedDrops() { // Increment the cursor epoch so that all cursors from this epoch are closed. _cursorEpoch.fetchAndAdd(1); - stdx::lock_guard<stdx::mutex> lock(_cacheLock); + stdx::lock_guard<Latch> lock(_cacheLock); for (SessionCache::iterator i = _sessions.begin(); i != _sessions.end(); i++) { (*i)->closeCursorsForQueuedDrops(_engine); } } size_t WiredTigerSessionCache::getIdleSessionsCount() { - stdx::lock_guard<stdx::mutex> lock(_cacheLock); + stdx::lock_guard<Latch> lock(_cacheLock); return _sessions.size(); } @@ -361,7 +361,7 @@ void WiredTigerSessionCache::closeExpiredIdleSessions(int64_t idleTimeMillis) { auto cutoffTime = _clockSource->now() - Milliseconds(idleTimeMillis); { - stdx::lock_guard<stdx::mutex> lock(_cacheLock); + stdx::lock_guard<Latch> lock(_cacheLock); // Discard all sessions that became idle before the cutoff time for (auto it = _sessions.begin(); it != _sessions.end();) { auto session = *it; @@ -381,7 +381,7 @@ void WiredTigerSessionCache::closeAll() { SessionCache swap; { - stdx::lock_guard<stdx::mutex> lock(_cacheLock); + stdx::lock_guard<Latch> lock(_cacheLock); _epoch.fetchAndAdd(1); _sessions.swap(swap); } @@ -401,7 +401,7 @@ UniqueWiredTigerSession WiredTigerSessionCache::getSession() { invariant(!(_shuttingDown.loadRelaxed() & kShuttingDownMask)); { - stdx::lock_guard<stdx::mutex> lock(_cacheLock); + stdx::lock_guard<Latch> lock(_cacheLock); if (!_sessions.empty()) { // Get the most recently used session so that if we discard sessions, we're // discarding older ones @@ -468,7 +468,7 @@ void WiredTigerSessionCache::releaseSession(WiredTigerSession* session) { session->setIdleExpireTime(_clockSource->now()); if (session->_getEpoch() == currentEpoch) { // check outside of lock to reduce contention - stdx::lock_guard<stdx::mutex> lock(_cacheLock); + stdx::lock_guard<Latch> lock(_cacheLock); if (session->_getEpoch() == _epoch.load()) { // recheck inside the lock for correctness returnedToCache = true; _sessions.push_back(session); @@ -485,7 +485,7 @@ void WiredTigerSessionCache::releaseSession(WiredTigerSession* session) { void WiredTigerSessionCache::setJournalListener(JournalListener* jl) { - stdx::unique_lock<stdx::mutex> lk(_journalListenerMutex); + stdx::unique_lock<Latch> lk(_journalListenerMutex); _journalListener = jl; } diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h index 72b55e311ed..9a94f175cdc 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h @@ -37,7 +37,7 @@ #include "mongo/db/storage/journal_listener.h" #include "mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h" #include "mongo/platform/atomic_word.h" -#include "mongo/stdx/mutex.h" +#include "mongo/platform/mutex.h" #include "mongo/util/concurrency/spin_lock.h" namespace mongo { @@ -323,7 +323,7 @@ private: AtomicWord<unsigned> _shuttingDown; static const uint32_t kShuttingDownMask = 1 << 31; - stdx::mutex _cacheLock; + Mutex _cacheLock = MONGO_MAKE_LATCH("WiredTigerSessionCache::_cacheLock"); typedef std::vector<WiredTigerSession*> SessionCache; SessionCache _sessions; @@ -335,15 +335,16 @@ private: // Counter and critical section mutex for waitUntilDurable AtomicWord<unsigned> _lastSyncTime; - stdx::mutex _lastSyncMutex; + Mutex _lastSyncMutex = MONGO_MAKE_LATCH("WiredTigerSessionCache::_lastSyncMutex"); // Mutex and cond var for waiting on prepare commit or abort. - stdx::mutex _prepareCommittedOrAbortedMutex; + Mutex _prepareCommittedOrAbortedMutex = + MONGO_MAKE_LATCH("WiredTigerSessionCache::_prepareCommittedOrAbortedMutex"); stdx::condition_variable _prepareCommittedOrAbortedCond; AtomicWord<std::uint64_t> _prepareCommitOrAbortCounter{0}; // Protects _journalListener. - stdx::mutex _journalListenerMutex; + Mutex _journalListenerMutex = MONGO_MAKE_LATCH("WiredTigerSessionCache::_journalListenerMutex"); // Notified when we commit to the journal. JournalListener* _journalListener = &NoOpJournalListener::instance; diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp index 56c8161d134..76ddde766e0 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp @@ -64,7 +64,7 @@ WiredTigerSizeStorer::WiredTigerSizeStorer(WT_CONNECTION* conn, } WiredTigerSizeStorer::~WiredTigerSizeStorer() { - stdx::lock_guard<stdx::mutex> cursorLock(_cursorMutex); + stdx::lock_guard<Latch> cursorLock(_cursorMutex); _cursor->close(_cursor); } @@ -74,7 +74,7 @@ void WiredTigerSizeStorer::store(StringData uri, std::shared_ptr<SizeInfo> sizeI return; // Ordering is important: as the entry may be flushed concurrently, set the dirty flag last. - stdx::lock_guard<stdx::mutex> lk(_bufferMutex); + stdx::lock_guard<Latch> lk(_bufferMutex); auto& entry = _buffer[uri]; // During rollback it is possible to get a new SizeInfo. In that case clear the dirty flag, // so the SizeInfo can be destructed without triggering the dirty check invariant. @@ -90,13 +90,13 @@ void WiredTigerSizeStorer::store(StringData uri, std::shared_ptr<SizeInfo> sizeI std::shared_ptr<WiredTigerSizeStorer::SizeInfo> WiredTigerSizeStorer::load(StringData uri) const { { // Check if we can satisfy the read from the buffer. - stdx::lock_guard<stdx::mutex> bufferLock(_bufferMutex); + stdx::lock_guard<Latch> bufferLock(_bufferMutex); Buffer::const_iterator it = _buffer.find(uri); if (it != _buffer.end()) return it->second; } - stdx::lock_guard<stdx::mutex> cursorLock(_cursorMutex); + stdx::lock_guard<Latch> cursorLock(_cursorMutex); // Intentionally ignoring return value. ON_BLOCK_EXIT([&] { _cursor->reset(_cursor); }); @@ -125,7 +125,7 @@ std::shared_ptr<WiredTigerSizeStorer::SizeInfo> WiredTigerSizeStorer::load(Strin void WiredTigerSizeStorer::flush(bool syncToDisk) { Buffer buffer; { - stdx::lock_guard<stdx::mutex> bufferLock(_bufferMutex); + stdx::lock_guard<Latch> bufferLock(_bufferMutex); _buffer.swap(buffer); } @@ -133,13 +133,13 @@ void WiredTigerSizeStorer::flush(bool syncToDisk) { return; // Nothing to do. Timer t; - stdx::lock_guard<stdx::mutex> cursorLock(_cursorMutex); + stdx::lock_guard<Latch> cursorLock(_cursorMutex); { // On failure, place entries back into the map, unless a newer value already exists. ON_BLOCK_EXIT([this, &buffer]() { this->_cursor->reset(this->_cursor); if (!buffer.empty()) { - stdx::lock_guard<stdx::mutex> bufferLock(this->_bufferMutex); + stdx::lock_guard<Latch> bufferLock(this->_bufferMutex); for (auto& it : buffer) this->_buffer.try_emplace(it.first, it.second); } diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h index 5db2a4e72bc..79e5725ac81 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h @@ -36,7 +36,7 @@ #include "mongo/base/string_data.h" #include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" #include "mongo/platform/atomic_word.h" -#include "mongo/stdx/mutex.h" +#include "mongo/platform/mutex.h" #include "mongo/util/string_map.h" namespace mongo { @@ -95,12 +95,13 @@ private: const WiredTigerSession _session; const bool _readOnly; // Guards _cursor. Acquire *before* _bufferMutex. - mutable stdx::mutex _cursorMutex; + mutable Mutex _cursorMutex = MONGO_MAKE_LATCH("WiredTigerSessionStorer::_cursorMutex"); WT_CURSOR* _cursor; // pointer is const after constructor using Buffer = StringMap<std::shared_ptr<SizeInfo>>; - mutable stdx::mutex _bufferMutex; // Guards _buffer + mutable Mutex _bufferMutex = + MONGO_MAKE_LATCH("WiredTigerSessionStorer::_bufferMutex"); // Guards _buffer Buffer _buffer; }; } // namespace mongo diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp index 7216bc1727b..dd7c6ce52b5 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp @@ -42,14 +42,14 @@ namespace mongo { void WiredTigerSnapshotManager::setCommittedSnapshot(const Timestamp& timestamp) { - stdx::lock_guard<stdx::mutex> lock(_committedSnapshotMutex); + stdx::lock_guard<Latch> lock(_committedSnapshotMutex); invariant(!_committedSnapshot || *_committedSnapshot <= timestamp); _committedSnapshot = timestamp; } void WiredTigerSnapshotManager::setLocalSnapshot(const Timestamp& timestamp) { - stdx::lock_guard<stdx::mutex> lock(_localSnapshotMutex); + stdx::lock_guard<Latch> lock(_localSnapshotMutex); if (timestamp.isNull()) _localSnapshot = boost::none; else @@ -57,12 +57,12 @@ void WiredTigerSnapshotManager::setLocalSnapshot(const Timestamp& timestamp) { } boost::optional<Timestamp> WiredTigerSnapshotManager::getLocalSnapshot() { - stdx::lock_guard<stdx::mutex> lock(_localSnapshotMutex); + stdx::lock_guard<Latch> lock(_localSnapshotMutex); return _localSnapshot; } void WiredTigerSnapshotManager::dropAllSnapshots() { - stdx::lock_guard<stdx::mutex> lock(_committedSnapshotMutex); + stdx::lock_guard<Latch> lock(_committedSnapshotMutex); _committedSnapshot = boost::none; } @@ -71,7 +71,7 @@ boost::optional<Timestamp> WiredTigerSnapshotManager::getMinSnapshotForNextCommi return boost::none; } - stdx::lock_guard<stdx::mutex> lock(_committedSnapshotMutex); + stdx::lock_guard<Latch> lock(_committedSnapshotMutex); return _committedSnapshot; } @@ -81,7 +81,7 @@ Timestamp WiredTigerSnapshotManager::beginTransactionOnCommittedSnapshot( RoundUpPreparedTimestamps roundUpPreparedTimestamps) const { WiredTigerBeginTxnBlock txnOpen(session, prepareConflictBehavior, roundUpPreparedTimestamps); - stdx::lock_guard<stdx::mutex> lock(_committedSnapshotMutex); + stdx::lock_guard<Latch> lock(_committedSnapshotMutex); uassert(ErrorCodes::ReadConcernMajorityNotAvailableYet, "Committed view disappeared while running operation", _committedSnapshot); @@ -99,7 +99,7 @@ Timestamp WiredTigerSnapshotManager::beginTransactionOnLocalSnapshot( RoundUpPreparedTimestamps roundUpPreparedTimestamps) const { WiredTigerBeginTxnBlock txnOpen(session, prepareConflictBehavior, roundUpPreparedTimestamps); - stdx::lock_guard<stdx::mutex> lock(_localSnapshotMutex); + stdx::lock_guard<Latch> lock(_localSnapshotMutex); invariant(_localSnapshot); LOG(3) << "begin_transaction on local snapshot " << _localSnapshot.get().toString(); auto status = txnOpen.setReadSnapshot(_localSnapshot.get()); diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h index 75c9777a502..1726a7d4c2b 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h @@ -35,7 +35,7 @@ #include "mongo/bson/timestamp.h" #include "mongo/db/storage/snapshot_manager.h" #include "mongo/db/storage/wiredtiger/wiredtiger_begin_transaction_block.h" -#include "mongo/stdx/mutex.h" +#include "mongo/platform/mutex.h" namespace mongo { @@ -91,11 +91,13 @@ public: private: // Snapshot to use for reads at a commit timestamp. - mutable stdx::mutex _committedSnapshotMutex; // Guards _committedSnapshot. + mutable Mutex _committedSnapshotMutex = // Guards _committedSnapshot. + MONGO_MAKE_LATCH("WiredTigerSnapshotManager::_committedSnapshotMutex"); boost::optional<Timestamp> _committedSnapshot; // Snapshot to use for reads at a local stable timestamp. - mutable stdx::mutex _localSnapshotMutex; // Guards _localSnapshot. + mutable Mutex _localSnapshotMutex = // Guards _localSnapshot. + MONGO_MAKE_LATCH("WiredTigerSnapshotManager::_localSnapshotMutex"); boost::optional<Timestamp> _localSnapshot; }; } // namespace mongo |