summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_local.h1
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp6
-rw-r--r--src/mongo/db/catalog/collection_write_path.cpp2
-rw-r--r--src/mongo/db/catalog/validate_state.h5
-rw-r--r--src/mongo/db/catalog_raii_test.cpp4
-rw-r--r--src/mongo/db/cloner.cpp1
-rw-r--r--src/mongo/db/commands/feature_compatibility_version.cpp4
-rw-r--r--src/mongo/db/commands/fle2_compact_cmd.cpp2
-rw-r--r--src/mongo/db/commands/fsync.cpp4
-rw-r--r--src/mongo/db/commands/set_feature_compatibility_version_command.cpp8
-rw-r--r--src/mongo/db/commands/sleep_command.cpp12
-rw-r--r--src/mongo/db/concurrency/d_concurrency.cpp46
-rw-r--r--src/mongo/db/concurrency/d_concurrency.h123
-rw-r--r--src/mongo/db/concurrency/d_concurrency_bm.cpp11
-rw-r--r--src/mongo/db/concurrency/d_concurrency_test.cpp60
-rw-r--r--src/mongo/db/curop.cpp7
-rw-r--r--src/mongo/db/cursor_manager.cpp6
-rw-r--r--src/mongo/db/db_raii_test.cpp4
-rw-r--r--src/mongo/db/exec/requires_collection_stage.h1
-rw-r--r--src/mongo/db/index_build_entry_helpers.cpp1
-rw-r--r--src/mongo/db/index_builds_coordinator.cpp1
-rw-r--r--src/mongo/db/index_builds_coordinator.h1
-rw-r--r--src/mongo/db/index_builds_coordinator_mongod.cpp8
-rw-r--r--src/mongo/db/mongod_main.cpp1
-rw-r--r--src/mongo/db/pipeline/process_interface/replica_set_node_process_interface.cpp2
-rw-r--r--src/mongo/db/query/find.cpp1
-rw-r--r--src/mongo/db/repair.cpp5
-rw-r--r--src/mongo/db/repl/oplog_applier_impl.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.h3
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.h1
-rw-r--r--src/mongo/db/repl/storage_interface.h6
-rw-r--r--src/mongo/db/repl/tenant_migration_recipient_service.cpp4
-rw-r--r--src/mongo/db/s/balancer_stats_registry.cpp4
-rw-r--r--src/mongo/db/s/collection_sharding_runtime.h1
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.cpp2
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp24
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp12
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp2
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp6
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp8
-rw-r--r--src/mongo/db/s/database_sharding_state.cpp2
-rw-r--r--src/mongo/db/s/persistent_task_queue.h7
-rw-r--r--src/mongo/db/s/persistent_task_queue_test.cpp12
-rw-r--r--src/mongo/db/s/range_deletion_util.h1
-rw-r--r--src/mongo/db/s/resharding/resharding_util.cpp4
-rw-r--r--src/mongo/db/s/resharding/resharding_util.h3
-rw-r--r--src/mongo/db/storage/devnull/ephemeral_catalog_record_store.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h1
-rw-r--r--src/mongo/db/timeseries/bucket_catalog.h2
-rw-r--r--src/mongo/db/transaction/transaction_participant.h3
-rw-r--r--src/mongo/dbtests/framework.cpp5
51 files changed, 167 insertions, 277 deletions
diff --git a/src/mongo/db/auth/authz_manager_external_state_local.h b/src/mongo/db/auth/authz_manager_external_state_local.h
index 7c5e690b9d6..afaf6732b17 100644
--- a/src/mongo/db/auth/authz_manager_external_state_local.h
+++ b/src/mongo/db/auth/authz_manager_external_state_local.h
@@ -37,7 +37,6 @@
#include "mongo/db/auth/builtin_roles.h"
#include "mongo/db/auth/role_name.h"
#include "mongo/db/auth/user_name.h"
-#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/tenant_id.h"
#include "mongo/platform/mutex.h"
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index 5b50f768e51..537eb28426f 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -788,8 +788,7 @@ void CollectionImpl::deleteDocument(OperationContext* opCtx,
}
if (needsCappedLock()) {
- Lock::ResourceLock heldUntilEndOfWUOW{
- opCtx->lockState(), ResourceId(RESOURCE_METADATA, _ns), MODE_X};
+ Lock::ResourceLock heldUntilEndOfWUOW{opCtx, ResourceId(RESOURCE_METADATA, _ns), MODE_X};
}
std::vector<OplogSlot> oplogSlots;
@@ -889,8 +888,7 @@ RecordId CollectionImpl::updateDocument(OperationContext* opCtx,
invariant(newDoc.isOwned());
if (needsCappedLock()) {
- Lock::ResourceLock heldUntilEndOfWUOW{
- opCtx->lockState(), ResourceId(RESOURCE_METADATA, _ns), MODE_X};
+ Lock::ResourceLock heldUntilEndOfWUOW{opCtx, ResourceId(RESOURCE_METADATA, _ns), MODE_X};
}
SnapshotId sid = opCtx->recoveryUnit()->getSnapshotId();
diff --git a/src/mongo/db/catalog/collection_write_path.cpp b/src/mongo/db/catalog/collection_write_path.cpp
index bcbe13ea7d5..69664b71635 100644
--- a/src/mongo/db/catalog/collection_write_path.cpp
+++ b/src/mongo/db/catalog/collection_write_path.cpp
@@ -99,7 +99,7 @@ Status insertDocumentsImpl(OperationContext* opCtx,
// increasing cluster key natively guarantee preservation of the insertion order, and don't
// need serialisation. We allow concurrent inserts for clustered capped collections.
Lock::ResourceLock heldUntilEndOfWUOW{
- opCtx->lockState(), ResourceId(RESOURCE_METADATA, nss.ns()), MODE_X};
+ opCtx, ResourceId(RESOURCE_METADATA, nss.ns()), MODE_X};
}
std::vector<Record> records;
diff --git a/src/mongo/db/catalog/validate_state.h b/src/mongo/db/catalog/validate_state.h
index e28161a22a3..a186839e839 100644
--- a/src/mongo/db/catalog/validate_state.h
+++ b/src/mongo/db/catalog/validate_state.h
@@ -33,7 +33,6 @@
#include "mongo/db/catalog/collection_validation.h"
#include "mongo/db/catalog/throttle_cursor.h"
#include "mongo/db/catalog_raii.h"
-#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/server_options.h"
#include "mongo/db/storage/record_store.h"
@@ -41,10 +40,6 @@
#include "mongo/util/uuid.h"
namespace mongo {
-
-class Database;
-class IndexCatalogEntry;
-
namespace CollectionValidation {
/**
diff --git a/src/mongo/db/catalog_raii_test.cpp b/src/mongo/db/catalog_raii_test.cpp
index 5067cb69380..645aa903837 100644
--- a/src/mongo/db/catalog_raii_test.cpp
+++ b/src/mongo/db/catalog_raii_test.cpp
@@ -30,7 +30,6 @@
#include "mongo/db/catalog/database_holder_mock.h"
#include "mongo/db/catalog_raii.h"
#include "mongo/db/client.h"
-#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/concurrency/lock_state.h"
#include "mongo/db/service_context_test_fixture.h"
#include "mongo/db/storage/recovery_unit_noop.h"
@@ -404,8 +403,7 @@ TEST_F(CatalogRAIITestFixture, AutoGetCollectionLockFreeCompatibleWithDatabaseEx
}
TEST_F(CatalogRAIITestFixture, AutoGetCollectionLockFreeCompatibleWithRSTLExclusiveLock) {
- Lock::ResourceLock rstl(
- client1.second->lockState(), resourceIdReplicationStateTransitionLock, MODE_X);
+ Lock::ResourceLock rstl(client1.second.get(), resourceIdReplicationStateTransitionLock, MODE_X);
ASSERT(client1.second->lockState()->isRSTLExclusive());
AutoGetCollectionLockFree coll(
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index d1ec41e1295..4e7af24727b 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -41,7 +41,6 @@
#include "mongo/client/authenticate.h"
#include "mongo/db/catalog/collection_options.h"
#include "mongo/db/catalog/collection_write_path.h"
-#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/catalog/index_catalog.h"
#include "mongo/db/cloner_gen.h"
diff --git a/src/mongo/db/commands/feature_compatibility_version.cpp b/src/mongo/db/commands/feature_compatibility_version.cpp
index 928fe95af18..0faf55c09f3 100644
--- a/src/mongo/db/commands/feature_compatibility_version.cpp
+++ b/src/mongo/db/commands/feature_compatibility_version.cpp
@@ -528,7 +528,7 @@ void FeatureCompatibilityVersion::fassertInitializedAfterStartup(OperationContex
Lock::ExclusiveLock FeatureCompatibilityVersion::enterFCVChangeRegion(OperationContext* opCtx) {
invariant(!opCtx->lockState()->isLocked());
- return Lock::ExclusiveLock(opCtx->lockState(), fcvDocumentLock);
+ return Lock::ExclusiveLock(opCtx, fcvDocumentLock);
}
void FeatureCompatibilityVersion::advanceLastFCVUpdateTimestamp(Timestamp fcvUpdateTimestamp) {
@@ -595,7 +595,7 @@ FixedFCVRegion::FixedFCVRegion(OperationContext* opCtx)
: _lk([&] {
invariant(!opCtx->lockState()->isLocked());
invariant(!opCtx->lockState()->isRSTLLocked());
- return Lock::SharedLock(opCtx->lockState(), fcvDocumentLock);
+ return Lock::SharedLock(opCtx, fcvDocumentLock);
}()) {}
FixedFCVRegion::~FixedFCVRegion() = default;
diff --git a/src/mongo/db/commands/fle2_compact_cmd.cpp b/src/mongo/db/commands/fle2_compact_cmd.cpp
index bd3df18eb8d..7e32e95cf72 100644
--- a/src/mongo/db/commands/fle2_compact_cmd.cpp
+++ b/src/mongo/db/commands/fle2_compact_cmd.cpp
@@ -65,7 +65,7 @@ CompactStats compactEncryptedCompactionCollection(OperationContext* opCtx,
!ShardingState::get(opCtx)->enabled());
// Only allow one instance of compactStructuredEncryptionData to run at a time.
- Lock::ExclusiveLock fleCompactCommandLock(opCtx->lockState(), commandMutex);
+ Lock::ExclusiveLock fleCompactCommandLock(opCtx, commandMutex);
const auto& edcNss = request.getNamespace();
diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp
index c9555a9b1ab..c968bb51882 100644
--- a/src/mongo/db/commands/fsync.cpp
+++ b/src/mongo/db/commands/fsync.cpp
@@ -156,7 +156,7 @@ public:
return true;
}
- Lock::ExclusiveLock lk(opCtx->lockState(), commandMutex);
+ Lock::ExclusiveLock lk(opCtx, commandMutex);
const auto lockCountAtStart = getLockCount();
invariant(lockCountAtStart > 0 || !_lockThread);
@@ -305,7 +305,7 @@ public:
BSONObjBuilder& result) override {
LOGV2(20465, "command: unlock requested");
- Lock::ExclusiveLock lk(opCtx->lockState(), commandMutex);
+ Lock::ExclusiveLock lk(opCtx, commandMutex);
stdx::unique_lock<Latch> stateLock(fsyncCmd.lockStateMutex);
diff --git a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
index 63c66655e5b..29e7088a522 100644
--- a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
+++ b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
@@ -291,7 +291,7 @@ public:
opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE();
// Only allow one instance of setFeatureCompatibilityVersion to run at a time.
- Lock::ExclusiveLock setFCVCommandLock(opCtx->lockState(), commandMutex);
+ Lock::ExclusiveLock setFCVCommandLock(opCtx, commandMutex);
auto request = SetFeatureCompatibilityVersion::parse(
IDLParserContext("setFeatureCompatibilityVersion"), cmdObj);
@@ -520,8 +520,7 @@ private:
// upgrading to the latest FCV and act accordingly.
// - The global IX/X locked operation began prior to the FCV change, is acting on that
// assumption and will finish before upgrade procedures begin right after this.
- Lock::ResourceLock lk(
- opCtx, opCtx->lockState(), resourceIdFeatureCompatibilityVersion, MODE_S);
+ Lock::ResourceLock lk(opCtx, resourceIdFeatureCompatibilityVersion, MODE_S);
}
// This helper function is for any user collections uasserts, creations, or deletions that
@@ -821,8 +820,7 @@ private:
// upgrading to the latest FCV and act accordingly.
// - The global IX/X locked operation began prior to the FCV change, is acting on that
// assumption and will finish before upgrade procedures begin right after this.
- Lock::ResourceLock lk(
- opCtx, opCtx->lockState(), resourceIdFeatureCompatibilityVersion, MODE_S);
+ Lock::ResourceLock lk(opCtx, resourceIdFeatureCompatibilityVersion, MODE_S);
}
uassert(ErrorCodes::Error(549181),
diff --git a/src/mongo/db/commands/sleep_command.cpp b/src/mongo/db/commands/sleep_command.cpp
index 0ad5a7c5b99..efee82ad827 100644
--- a/src/mongo/db/commands/sleep_command.cpp
+++ b/src/mongo/db/commands/sleep_command.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/commands.h"
#include "mongo/db/commands/test_commands_enabled.h"
#include "mongo/db/concurrency/d_concurrency.h"
@@ -38,7 +35,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand
-
namespace mongo {
/* for diagnostic / testing purposes. Enabled via command line. */
@@ -125,19 +121,15 @@ public:
}
void _sleepInPBWM(mongo::OperationContext* opCtx, long long millis) {
- Lock::ResourceLock pbwm(
- opCtx, opCtx->lockState(), resourceIdParallelBatchWriterMode, MODE_X);
+ Lock::ResourceLock pbwm(opCtx, resourceIdParallelBatchWriterMode, MODE_X);
LOGV2(6001604, "PBWM MODE_X lock acquired by sleep command.");
opCtx->sleepFor(Milliseconds(millis));
- pbwm.unlock();
}
void _sleepInRSTL(mongo::OperationContext* opCtx, long long millis) {
- Lock::ResourceLock rstl(
- opCtx, opCtx->lockState(), resourceIdReplicationStateTransitionLock, MODE_X);
+ Lock::ResourceLock rstl(opCtx, resourceIdReplicationStateTransitionLock, MODE_X);
LOGV2(6001600, "RSTL MODE_X lock acquired by sleep command.");
opCtx->sleepFor(Milliseconds(millis));
- rstl.unlock();
}
CmdSleep() : BasicCommand("sleep") {}
diff --git a/src/mongo/db/concurrency/d_concurrency.cpp b/src/mongo/db/concurrency/d_concurrency.cpp
index 04a8c688405..a678e49bbb0 100644
--- a/src/mongo/db/concurrency/d_concurrency.cpp
+++ b/src/mongo/db/concurrency/d_concurrency.cpp
@@ -135,8 +135,7 @@ Lock::GlobalLock::GlobalLock(OperationContext* opCtx,
try {
if (_opCtx->lockState()->shouldConflictWithSecondaryBatchApplication()) {
- _pbwm.emplace(
- opCtx, opCtx->lockState(), resourceIdParallelBatchWriterMode, MODE_IS, deadline);
+ _pbwm.emplace(opCtx, resourceIdParallelBatchWriterMode, MODE_IS, deadline);
}
ScopeGuard unlockPBWM([this] {
if (_opCtx->lockState()->shouldConflictWithSecondaryBatchApplication()) {
@@ -146,7 +145,6 @@ Lock::GlobalLock::GlobalLock(OperationContext* opCtx,
if (_opCtx->lockState()->shouldConflictWithSetFeatureCompatibilityVersion()) {
_fcvLock.emplace(_opCtx,
- opCtx->lockState(),
resourceIdFeatureCompatibilityVersion,
isSharedLockMode(lockMode) ? MODE_IS : MODE_IX,
deadline);
@@ -247,24 +245,6 @@ Lock::DBLock::~DBLock() {
}
}
-void Lock::DBLock::relockWithMode(LockMode newMode) {
- // 2PL would delay the unlocking
- invariant(!_opCtx->lockState()->inAWriteUnitOfWork());
-
- // Not allowed to change global intent, so check when going from shared to exclusive.
- if (isSharedLockMode(_mode) && !isSharedLockMode(newMode))
- invariant(_opCtx->lockState()->isWriteLocked());
-
- _opCtx->lockState()->unlock(_id);
- _mode = newMode;
-
- // Verify we still have at least the Global resource locked.
- invariant(_opCtx->lockState()->isLocked());
-
- _opCtx->lockState()->lock(_opCtx, _id, _mode);
- _result = LOCK_OK;
-}
-
Lock::CollectionLock::CollectionLock(OperationContext* opCtx,
const NamespaceStringOrUUID& nssOrUUID,
LockMode mode,
@@ -321,19 +301,27 @@ Lock::CollectionLock::~CollectionLock() {
_opCtx->lockState()->unlock(_id);
}
-Lock::ParallelBatchWriterMode::ParallelBatchWriterMode(Locker* lockState)
- : _pbwm(lockState, resourceIdParallelBatchWriterMode, MODE_X),
- _shouldNotConflictBlock(lockState) {}
+Lock::ParallelBatchWriterMode::ParallelBatchWriterMode(OperationContext* opCtx)
+ : _pbwm(opCtx, resourceIdParallelBatchWriterMode, MODE_X),
+ _shouldNotConflictBlock(opCtx->lockState()) {}
-void Lock::ResourceLock::lock(OperationContext* opCtx, LockMode mode, Date_t deadline) {
+void Lock::ResourceLock::_lock(LockMode mode, Date_t deadline) {
invariant(_result == LOCK_INVALID);
- _locker->lock(opCtx, _rid, mode, deadline);
+ if (_opCtx)
+ _opCtx->lockState()->lock(_opCtx, _rid, mode, deadline);
+ else
+ _locker->lock(_rid, mode, deadline);
+
_result = LOCK_OK;
}
-void Lock::ResourceLock::unlock() {
- if (_result == LOCK_OK) {
- _locker->unlock(_rid);
+void Lock::ResourceLock::_unlock() {
+ if (_isLocked()) {
+ if (_opCtx)
+ _opCtx->lockState()->unlock(_rid);
+ else
+ _locker->unlock(_rid);
+
_result = LOCK_INVALID;
}
}
diff --git a/src/mongo/db/concurrency/d_concurrency.h b/src/mongo/db/concurrency/d_concurrency.h
index ae8501303a7..b999c6103da 100644
--- a/src/mongo/db/concurrency/d_concurrency.h
+++ b/src/mongo/db/concurrency/d_concurrency.h
@@ -37,7 +37,6 @@
namespace mongo {
-class StringData;
class NamespaceStringOrUUID;
class Lock {
@@ -50,61 +49,60 @@ public:
* resources other than RESOURCE_GLOBAL, RESOURCE_DATABASE and RESOURCE_COLLECTION.
*/
class ResourceLock {
- ResourceLock(const ResourceLock&) = delete;
- ResourceLock& operator=(const ResourceLock&) = delete;
-
public:
- ResourceLock(Locker* locker, ResourceId rid, LockMode mode)
- : ResourceLock(nullptr, locker, rid, mode) {}
-
ResourceLock(OperationContext* opCtx,
- Locker* locker,
ResourceId rid,
LockMode mode,
Date_t deadline = Date_t::max())
- : _rid(rid), _locker(locker), _result(LOCK_INVALID) {
- lock(opCtx, mode, deadline);
+ : _opCtx(opCtx), _locker(_opCtx->lockState()), _rid(rid) {
+ _lock(mode, deadline);
+ }
+
+ // TODO (SERVER-69461): Do not any new usages of this constructor and get rid of it
+ ResourceLock(Locker* locker, ResourceId rid, LockMode mode)
+ : _opCtx(nullptr), _locker(locker), _rid(rid) {
+ _lock(mode);
}
ResourceLock(ResourceLock&& otherLock)
- : _rid(otherLock._rid), _locker(otherLock._locker), _result(otherLock._result) {
- // Mark as moved so the destructor doesn't invalidate the newly-
- // constructed lock.
+ : _opCtx(otherLock._opCtx),
+ _locker(otherLock._locker),
+ _rid(std::move(otherLock._rid)),
+ _result(otherLock._result) {
+ otherLock._opCtx = nullptr;
+ otherLock._locker = nullptr;
otherLock._result = LOCK_INVALID;
}
~ResourceLock() {
- if (isLocked()) {
- unlock();
- }
+ _unlock();
}
+ protected:
/**
* Acquires lock on this specified resource in the specified mode.
*
- * If 'opCtx' is provided, it will be used to interrupt a LOCK_WAITING state.
* If 'deadline' is provided, we will wait until 'deadline' for the lock to be granted.
* Otherwise, this parameter defaults to an infinite deadline.
*
* This function may throw an exception if it is interrupted.
*/
- void lock(OperationContext* opCtx, LockMode mode, Date_t deadline = Date_t::max());
-
- void unlock();
-
- bool isLocked() const {
+ void _lock(LockMode mode, Date_t deadline = Date_t::max());
+ void _unlock();
+ bool _isLocked() const {
return _result == LOCK_OK;
}
- private:
- const ResourceId _rid;
- Locker* const _locker;
+ OperationContext* _opCtx;
- LockResult _result;
- };
+ // TODO (SERVER-69461): Get rid of this field when the Locker-only constructor is removed.
+ Locker* _locker;
- class SharedLock;
- class ExclusiveLock;
+ ResourceId _rid;
+
+ private:
+ LockResult _result{LOCK_INVALID};
+ };
/**
* For use as general mutex or readers/writers lock, outside the general multi-granularity
@@ -118,7 +116,14 @@ public:
std::string getName() const {
return getName(_rid);
- };
+ }
+
+ /**
+ * Each instantiation of this class allocates a new ResourceId.
+ */
+ ResourceId getRid() const {
+ return _rid;
+ }
static std::string getName(ResourceId resourceId);
@@ -127,16 +132,6 @@ public:
bool isAtLeastReadLocked(Locker* locker);
private:
- friend class Lock::SharedLock;
- friend class Lock::ExclusiveLock;
-
- /**
- * Each instantiation of this class allocates a new ResourceId.
- */
- ResourceId rid() const {
- return _rid;
- }
-
const ResourceId _rid;
};
@@ -145,23 +140,25 @@ public:
*/
class ExclusiveLock : public ResourceLock {
public:
- ExclusiveLock(Locker* locker, ResourceMutex mutex)
- : ExclusiveLock(nullptr, locker, mutex) {}
+ ExclusiveLock(OperationContext* opCtx, ResourceMutex mutex)
+ : ResourceLock(opCtx, mutex.getRid(), MODE_X) {}
- /**
- * Interruptible lock acquisition.
- */
- ExclusiveLock(OperationContext* opCtx, Locker* locker, ResourceMutex mutex)
- : ResourceLock(opCtx, locker, mutex.rid(), MODE_X) {}
+ ExclusiveLock(Locker* locker, ResourceMutex mutex)
+ : ResourceLock(locker, mutex.getRid(), MODE_X) {}
- using ResourceLock::lock;
+ // Lock/unlock overloads to allow ExclusiveLock to be used with with stdx::unique_lock and
+ // stdx::condition_variable_any
- /**
- * Parameterless overload to allow ExclusiveLock to be used with stdx::unique_lock and
- * stdx::condition_variable_any
- */
void lock() {
- lock(nullptr, MODE_X);
+ _lock(MODE_X);
+ }
+
+ void unlock() {
+ _unlock();
+ }
+
+ bool isLocked() const {
+ return _isLocked();
}
};
@@ -172,13 +169,11 @@ public:
*/
class SharedLock : public ResourceLock {
public:
- SharedLock(Locker* locker, ResourceMutex mutex) : SharedLock(nullptr, locker, mutex) {}
+ SharedLock(OperationContext* opCtx, ResourceMutex mutex)
+ : ResourceLock(opCtx, mutex.getRid(), MODE_IS) {}
- /**
- * Interruptible lock acquisition.
- */
- SharedLock(OperationContext* opCtx, Locker* locker, ResourceMutex mutex)
- : ResourceLock(opCtx, locker, mutex.rid(), MODE_IS) {}
+ SharedLock(Locker* locker, ResourceMutex mutex)
+ : ResourceLock(locker, mutex.getRid(), MODE_IS) {}
};
/**
@@ -315,14 +310,6 @@ public:
DBLock(DBLock&&);
~DBLock();
- /**
- * Releases the DBLock and reacquires it with the new mode. The global intent
- * lock is retained (so the database can't disappear). Relocking from MODE_IS or
- * MODE_S to MODE_IX or MODE_X is not allowed to avoid violating the global intent.
- * Use relockWithMode() instead of upgrading to avoid deadlock.
- */
- void relockWithMode(LockMode newMode);
-
bool isLocked() const {
return _result == LOCK_OK;
}
@@ -386,7 +373,7 @@ public:
ParallelBatchWriterMode& operator=(const ParallelBatchWriterMode&) = delete;
public:
- explicit ParallelBatchWriterMode(Locker* lockState);
+ explicit ParallelBatchWriterMode(OperationContext* opCtx);
private:
ResourceLock _pbwm;
diff --git a/src/mongo/db/concurrency/d_concurrency_bm.cpp b/src/mongo/db/concurrency/d_concurrency_bm.cpp
index ecc944d64fa..e62d3dd4345 100644
--- a/src/mongo/db/concurrency/d_concurrency_bm.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_bm.cpp
@@ -27,7 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
#include <benchmark/benchmark.h>
#include "mongo/base/init.h"
@@ -37,9 +36,6 @@
#include "mongo/platform/mutex.h"
#include "mongo/unittest/unittest.h"
-#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
-
-
namespace mongo {
namespace {
@@ -81,11 +77,11 @@ public:
*/
void makeKClientsWithLockers(int k) {
clients.reserve(k);
+
for (int i = 0; i < k; ++i) {
auto client = getGlobalServiceContext()->makeClient(str::stream()
<< "test client for thread " << i);
auto opCtx = client->makeOperationContext();
- locker[i] = std::make_unique<LockerImpl>(opCtx->getServiceContext());
clients.emplace_back(std::move(client), std::move(opCtx));
}
}
@@ -93,7 +89,6 @@ public:
protected:
std::vector<std::pair<ServiceContext::UniqueClient, ServiceContext::UniqueOperationContext>>
clients;
- std::array<std::unique_ptr<LockerImpl>, kMaxPerfThreads> locker;
};
BENCHMARK_DEFINE_F(DConcurrencyTest, BM_StdMutex)(benchmark::State& state) {
@@ -116,7 +111,7 @@ BENCHMARK_DEFINE_F(DConcurrencyTest, BM_ResourceMutexShared)(benchmark::State& s
static Lock::ResourceMutex mtx("testMutex");
for (auto keepRunning : state) {
- Lock::SharedLock lk(locker[state.thread_index].get(), mtx);
+ Lock::SharedLock lk(clients[state.thread_index].second.get(), mtx);
}
}
@@ -128,7 +123,7 @@ BENCHMARK_DEFINE_F(DConcurrencyTest, BM_ResourceMutexExclusive)(benchmark::State
static Lock::ResourceMutex mtx("testMutex");
for (auto keepRunning : state) {
- Lock::ExclusiveLock lk(locker[state.thread_index].get(), mtx);
+ Lock::ExclusiveLock lk(clients[state.thread_index].second.get(), mtx);
}
}
diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp
index 8e11fa7ef62..6899baadb8e 100644
--- a/src/mongo/db/concurrency/d_concurrency_test.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_test.cpp
@@ -28,7 +28,6 @@
*/
#include <functional>
-#include <memory>
#include <string>
#include <vector>
@@ -190,9 +189,7 @@ TEST_F(DConcurrencyTestFixture,
TEST_F(DConcurrencyTestFixture, ResourceMutex) {
Lock::ResourceMutex mtx("testMutex");
auto opCtx = makeOperationContext();
- LockerImpl locker1(opCtx->getServiceContext());
- LockerImpl locker2(opCtx->getServiceContext());
- LockerImpl locker3(opCtx->getServiceContext());
+ auto clients = makeKClientsWithLockers(3);
struct State {
void check(int n) {
@@ -213,53 +210,56 @@ TEST_F(DConcurrencyTestFixture, ResourceMutex) {
} state;
stdx::thread t1([&]() {
+ boost::optional<Lock::SharedLock> lk;
+
// Step 0: Single thread acquires shared lock
state.waitFor(0);
- Lock::SharedLock lk(&locker1, mtx);
- ASSERT(lk.isLocked());
+ lk.emplace(clients[0].second.get(), mtx);
state.finish(0);
// Step 4: Wait for t2 to regain its shared lock
{
state.waitFor(4);
- state.waitFor([&locker2]() { return locker2.getWaitingResource().isValid(); });
+ state.waitFor([locker1 = clients[1].second->lockState()]() {
+ return locker1->getWaitingResource().isValid();
+ });
state.finish(4);
}
// Step 5: After t2 becomes blocked, unlock, yielding the mutex to t3
- lk.unlock();
- ASSERT(!lk.isLocked());
+ lk.reset();
});
stdx::thread t2([&]() {
+ boost::optional<Lock::SharedLock> lk;
+
// Step 1: Two threads acquire shared lock
state.waitFor(1);
- Lock::SharedLock lk(&locker2, mtx);
- ASSERT(lk.isLocked());
+ lk.emplace(clients[1].second.get(), mtx);
state.finish(1);
// Step 2: Wait for t3 to attempt the exclusive lock
- state.waitFor([&locker3]() { return locker3.getWaitingResource().isValid(); });
+ state.waitFor([locker2 = clients[2].second->lockState()]() {
+ return locker2->getWaitingResource().isValid();
+ });
state.finish(2);
// Step 3: Yield shared lock
- lk.unlock();
- ASSERT(!lk.isLocked());
+ lk.reset();
state.finish(3);
// Step 4: Try to regain the shared lock // transfers control to t1
- lk.lock(nullptr, MODE_IS);
+ lk.emplace(clients[1].second.get(), mtx);
- // Step 6: CHeck we actually got back the shared lock
- ASSERT(lk.isLocked());
+ // Step 6: Check we actually got back the shared lock
state.check(6);
});
stdx::thread t3([&]() {
// Step 2: Third thread attempts to acquire exclusive lock
state.waitFor(2);
- Lock::ExclusiveLock lk(&locker3, mtx); // transfers control to t2
// Step 5: Actually get the exclusive lock
- ASSERT(lk.isLocked());
+ Lock::ExclusiveLock lk(clients[2].second.get(),
+ mtx); // transfers control to t2
state.finish(5);
});
t1.join();
@@ -461,7 +461,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockX_Timeout) {
TEST_F(DConcurrencyTestFixture, RSTLmodeX_Timeout) {
auto clients = makeKClientsWithLockers(2);
Lock::ResourceLock rstl(
- clients[0].second.get()->lockState(), resourceIdReplicationStateTransitionLock, MODE_X);
+ clients[0].second.get(), resourceIdReplicationStateTransitionLock, MODE_X);
ASSERT_EQ(
clients[0].second.get()->lockState()->getLockMode(resourceIdReplicationStateTransitionLock),
MODE_X);
@@ -482,7 +482,7 @@ TEST_F(DConcurrencyTestFixture, RSTLmodeX_Timeout) {
TEST_F(DConcurrencyTestFixture, PBWMmodeX_Timeout) {
auto clients = makeKClientsWithLockers(2);
- Lock::ParallelBatchWriterMode pbwm(clients[0].second.get()->lockState());
+ Lock::ParallelBatchWriterMode pbwm(clients[0].second.get());
ASSERT_EQ(clients[0].second.get()->lockState()->getLockMode(resourceIdParallelBatchWriterMode),
MODE_X);
@@ -773,7 +773,7 @@ TEST_F(DConcurrencyTestFixture, GlobalLockWaitIsInterruptibleBlockedOnRSTL) {
// The main thread takes an exclusive lock, causing the spawned thread to wait when it attempts
// to acquire a conflicting lock.
- Lock::ResourceLock rstl(opCtx1->lockState(), resourceIdReplicationStateTransitionLock, MODE_X);
+ Lock::ResourceLock rstl(opCtx1, resourceIdReplicationStateTransitionLock, MODE_X);
auto result = runTaskAndKill(opCtx2, [&]() {
// Killing the lock wait should throw an exception.
@@ -840,7 +840,7 @@ TEST_F(DConcurrencyTestFixture,
// The main thread takes an exclusive lock, causing the spawned thread to wait when it attempts
// to acquire a conflicting lock.
- Lock::ResourceLock rstl(opCtx1->lockState(), resourceIdReplicationStateTransitionLock, MODE_X);
+ Lock::ResourceLock rstl(opCtx1, resourceIdReplicationStateTransitionLock, MODE_X);
// Acquire this later to confirm that it stays unlocked.
boost::optional<Lock::GlobalLock> g2 = boost::none;
@@ -900,7 +900,7 @@ TEST_F(DConcurrencyTestFixture, SetMaxLockTimeoutMillisAndNotUsingInterruptBehav
auto opCtx2 = clients[1].second.get();
// Take the exclusive lock with the first caller.
- Lock::ResourceLock rstl(opCtx1->lockState(), resourceIdReplicationStateTransitionLock, MODE_X);
+ Lock::ResourceLock rstl(opCtx1, resourceIdReplicationStateTransitionLock, MODE_X);
// Set a max timeout on the second caller that will override provided lock request deadlines.
// Then requesting a lock with Date_t::max() should cause a LockTimeout error to be thrown
@@ -945,7 +945,7 @@ TEST_F(DConcurrencyTestFixture,
auto opCtx2 = clients[1].second.get();
// Take the exclusive lock with the first caller.
- Lock::ResourceLock rstl(opCtx1->lockState(), resourceIdReplicationStateTransitionLock, MODE_X);
+ Lock::ResourceLock rstl(opCtx1, resourceIdReplicationStateTransitionLock, MODE_X);
// Set a max timeout on the second caller that will override provided lock request deadlines.
// Then requesting a lock with Date_t::max() should cause a LockTimeout error to be thrown.
@@ -2336,15 +2336,13 @@ TEST_F(DConcurrencyTestFixture, PBWMRespectsMaxTimeMS) {
auto clientOpCtxPairs = makeKClientsWithLockers(2);
auto opCtx1 = clientOpCtxPairs[0].second.get();
- Lock::ResourceLock pbwm1(
- opCtx1, opCtx1->lockState(), resourceIdParallelBatchWriterMode, MODE_X);
+ Lock::ResourceLock pbwm1(opCtx1, resourceIdParallelBatchWriterMode, MODE_X);
auto opCtx2 = clientOpCtxPairs[1].second.get();
opCtx2->setDeadlineAfterNowBy(Seconds{1}, ErrorCodes::ExceededTimeLimit);
- ASSERT_THROWS_CODE(
- Lock::ResourceLock(opCtx2, opCtx2->lockState(), resourceIdParallelBatchWriterMode, MODE_X),
- AssertionException,
- ErrorCodes::ExceededTimeLimit);
+ ASSERT_THROWS_CODE(Lock::ResourceLock(opCtx2, resourceIdParallelBatchWriterMode, MODE_X),
+ AssertionException,
+ ErrorCodes::ExceededTimeLimit);
}
TEST_F(DConcurrencyTestFixture, DifferentTenantsTakeDBLockOnConflictingNamespaceOk) {
diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp
index b5e53e339d3..36f6ba7ebd6 100644
--- a/src/mongo/db/curop.cpp
+++ b/src/mongo/db/curop.cpp
@@ -41,7 +41,6 @@
#include "mongo/db/commands.h"
#include "mongo/db/commands/server_status_metric.h"
#include "mongo/db/concurrency/d_concurrency.h"
-#include "mongo/db/concurrency/locker.h"
#include "mongo/db/json.h"
#include "mongo/db/prepare_conflict_tracker.h"
#include "mongo/db/profile_filter.h"
@@ -60,11 +59,7 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault
-
namespace mongo {
-
-using std::string;
-
namespace {
auto& oplogGetMoreStats = makeServerStatusMetric<TimerStats>("repl.network.oplogGetMoresProcessed");
@@ -1623,7 +1618,7 @@ void OpDebug::AdditiveMetrics::incrementPrepareReadConflicts(long long n) {
prepareReadConflicts.fetchAndAdd(n);
}
-string OpDebug::AdditiveMetrics::report() const {
+std::string OpDebug::AdditiveMetrics::report() const {
StringBuilder s;
OPDEBUG_TOSTRING_HELP_OPTIONAL("keysExamined", keysExamined);
diff --git a/src/mongo/db/cursor_manager.cpp b/src/mongo/db/cursor_manager.cpp
index df6e8ef8cbd..2ed5da7e80b 100644
--- a/src/mongo/db/cursor_manager.cpp
+++ b/src/mongo/db/cursor_manager.cpp
@@ -27,19 +27,14 @@
* it in the license file.
*/
-
#include "mongo/db/cursor_manager.h"
-#include <memory>
-
#include "mongo/base/data_cursor.h"
#include "mongo/base/init.h"
#include "mongo/db/allocate_cursor_id.h"
#include "mongo/db/audit.h"
#include "mongo/db/auth/authorization_checks.h"
#include "mongo/db/auth/authorization_session.h"
-#include "mongo/db/catalog/collection.h"
-#include "mongo/db/catalog/database.h"
#include "mongo/db/client.h"
#include "mongo/db/curop.h"
#include "mongo/db/cursor_server_params.h"
@@ -57,7 +52,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery
-
namespace mongo {
static CounterMetric cursorStatsLifespanLessThan1Second{"cursor.lifespan.lessThan1Second"};
diff --git a/src/mongo/db/db_raii_test.cpp b/src/mongo/db/db_raii_test.cpp
index f49bca93985..5a60b752977 100644
--- a/src/mongo/db/db_raii_test.cpp
+++ b/src/mongo/db/db_raii_test.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include <string>
#include "mongo/db/catalog/catalog_test_fixture.h"
@@ -47,7 +44,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
-
namespace mongo {
namespace {
diff --git a/src/mongo/db/exec/requires_collection_stage.h b/src/mongo/db/exec/requires_collection_stage.h
index eaaafed2abe..8265323458b 100644
--- a/src/mongo/db/exec/requires_collection_stage.h
+++ b/src/mongo/db/exec/requires_collection_stage.h
@@ -31,7 +31,6 @@
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/collection_catalog.h"
-#include "mongo/db/catalog/database.h"
#include "mongo/db/exec/plan_stage.h"
#include "mongo/util/uuid.h"
diff --git a/src/mongo/db/index_build_entry_helpers.cpp b/src/mongo/db/index_build_entry_helpers.cpp
index 537eb6916d1..351f660191b 100644
--- a/src/mongo/db/index_build_entry_helpers.cpp
+++ b/src/mongo/db/index_build_entry_helpers.cpp
@@ -31,7 +31,6 @@
#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/commit_quorum_options.h"
-#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/index_build_entry_gen.h"
#include "mongo/db/catalog/local_oplog_info.h"
#include "mongo/db/catalog_raii.h"
diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp
index 8b70f00cc20..4e0eb2f7451 100644
--- a/src/mongo/db/index_builds_coordinator.cpp
+++ b/src/mongo/db/index_builds_coordinator.cpp
@@ -40,7 +40,6 @@
#include "mongo/db/catalog/index_build_entry_gen.h"
#include "mongo/db/catalog_raii.h"
#include "mongo/db/concurrency/exception_util.h"
-#include "mongo/db/concurrency/locker.h"
#include "mongo/db/concurrency/replication_state_transition_lock_guard.h"
#include "mongo/db/curop.h"
#include "mongo/db/db_raii.h"
diff --git a/src/mongo/db/index_builds_coordinator.h b/src/mongo/db/index_builds_coordinator.h
index e0d8611b420..d5112167610 100644
--- a/src/mongo/db/index_builds_coordinator.h
+++ b/src/mongo/db/index_builds_coordinator.h
@@ -43,7 +43,6 @@
#include "mongo/db/catalog/index_builds.h"
#include "mongo/db/catalog/index_builds_manager.h"
#include "mongo/db/commands/server_status.h"
-#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/index/column_key_generator.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/rebuild_indexes.h"
diff --git a/src/mongo/db/index_builds_coordinator_mongod.cpp b/src/mongo/db/index_builds_coordinator_mongod.cpp
index 4eeb20f000d..183c13a1c32 100644
--- a/src/mongo/db/index_builds_coordinator_mongod.cpp
+++ b/src/mongo/db/index_builds_coordinator_mongod.cpp
@@ -458,7 +458,7 @@ Status IndexBuildsCoordinatorMongod::voteCommitIndexBuild(OperationContext* opCt
// commit quorum is disabled, do not record their entry into the commit ready nodes.
// If we fail to retrieve the persisted commit quorum, the index build might be in the
// middle of tearing down.
- Lock::SharedLock commitQuorumLk(opCtx->lockState(), replState->commitQuorumLock.value());
+ Lock::SharedLock commitQuorumLk(opCtx, *replState->commitQuorumLock);
auto commitQuorum =
uassertStatusOK(indexbuildentryhelpers::getCommitQuorum(opCtx, buildUUID));
if (commitQuorum.numNodes == CommitQuorumOptions::kDisabled) {
@@ -500,7 +500,7 @@ void IndexBuildsCoordinatorMongod::_signalIfCommitQuorumIsSatisfied(
// Acquire the commitQuorumLk in shared mode to make sure commit quorum value did not change
// after reading it from config.system.indexBuilds collection.
- Lock::SharedLock commitQuorumLk(opCtx->lockState(), replState->commitQuorumLock.value());
+ Lock::SharedLock commitQuorumLk(opCtx, *replState->commitQuorumLock);
// Read the index builds entry from config.system.indexBuilds collection.
auto swIndexBuildEntry =
@@ -547,7 +547,7 @@ bool IndexBuildsCoordinatorMongod::_signalIfCommitQuorumNotEnabled(
// Acquire the commitQuorumLk in shared mode to make sure commit quorum value did not change
// after reading it from config.system.indexBuilds collection.
- Lock::SharedLock commitQuorumLk(opCtx->lockState(), replState->commitQuorumLock.value());
+ Lock::SharedLock commitQuorumLk(opCtx, *replState->commitQuorumLock);
// Read the commit quorum value from config.system.indexBuilds collection.
auto commitQuorum = uassertStatusOKWithContext(
@@ -875,7 +875,7 @@ Status IndexBuildsCoordinatorMongod::setCommitQuorum(OperationContext* opCtx,
// About to update the commit quorum value on-disk. So, take the lock in exclusive mode to
// prevent readers from reading the commit quorum value and making decision on commit quorum
// satisfied with the stale read commit quorum value.
- Lock::ExclusiveLock commitQuorumLk(opCtx->lockState(), replState->commitQuorumLock.value());
+ Lock::ExclusiveLock commitQuorumLk(opCtx, *replState->commitQuorumLock);
{
if (auto action = replState->getNextActionNoWait()) {
return Status(ErrorCodes::CommandFailed,
diff --git a/src/mongo/db/mongod_main.cpp b/src/mongo/db/mongod_main.cpp
index 0270aef7d24..8cbe5909154 100644
--- a/src/mongo/db/mongod_main.cpp
+++ b/src/mongo/db/mongod_main.cpp
@@ -52,7 +52,6 @@
#include "mongo/db/catalog/collection_impl.h"
#include "mongo/db/catalog/collection_write_path.h"
#include "mongo/db/catalog/create_collection.h"
-#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/database_holder_impl.h"
#include "mongo/db/catalog/health_log.h"
#include "mongo/db/catalog/index_catalog.h"
diff --git a/src/mongo/db/pipeline/process_interface/replica_set_node_process_interface.cpp b/src/mongo/db/pipeline/process_interface/replica_set_node_process_interface.cpp
index e4cdf236d38..ee6980c27dd 100644
--- a/src/mongo/db/pipeline/process_interface/replica_set_node_process_interface.cpp
+++ b/src/mongo/db/pipeline/process_interface/replica_set_node_process_interface.cpp
@@ -250,7 +250,7 @@ void ReplicaSetNodeProcessInterface::_attachGenericCommandArgs(OperationContext*
bool ReplicaSetNodeProcessInterface::_canWriteLocally(OperationContext* opCtx,
const NamespaceString& ns) const {
- Lock::ResourceLock rstl(opCtx->lockState(), resourceIdReplicationStateTransitionLock, MODE_IX);
+ Lock::ResourceLock rstl(opCtx, resourceIdReplicationStateTransitionLock, MODE_IX);
return repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, ns);
}
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index 817b2782168..049eb3b63a7 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -36,7 +36,6 @@
#include "mongo/base/error_codes.h"
#include "mongo/db/auth/authorization_session.h"
-#include "mongo/db/catalog/collection.h"
#include "mongo/db/client.h"
#include "mongo/db/clientcursor.h"
#include "mongo/db/commands.h"
diff --git a/src/mongo/db/repair.cpp b/src/mongo/db/repair.cpp
index a6f8029be24..0a6dade23af 100644
--- a/src/mongo/db/repair.cpp
+++ b/src/mongo/db/repair.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include <algorithm>
#include <fmt/format.h>
@@ -42,7 +39,6 @@
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/collection_catalog.h"
#include "mongo/db/catalog/collection_validation.h"
-#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/catalog/index_key_validate.h"
@@ -63,7 +59,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage
-
namespace mongo {
using namespace fmt::literals;
diff --git a/src/mongo/db/repl/oplog_applier_impl.cpp b/src/mongo/db/repl/oplog_applier_impl.cpp
index 353d1965f8f..032e9109f14 100644
--- a/src/mongo/db/repl/oplog_applier_impl.cpp
+++ b/src/mongo/db/repl/oplog_applier_impl.cpp
@@ -502,7 +502,7 @@ StatusWith<OpTime> OplogApplierImpl::_applyOplogBatch(OperationContext* opCtx,
// Stop all readers until we're done. This also prevents doc-locking engines from deleting old
// entries from the oplog until we finish writing.
- Lock::ParallelBatchWriterMode pbwm(opCtx->lockState());
+ Lock::ParallelBatchWriterMode pbwm(opCtx);
invariant(_replCoord);
if (_replCoord->getApplierState() == ReplicationCoordinator::ApplierState::Stopped) {
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.h b/src/mongo/db/repl/replication_coordinator_external_state_impl.h
index 2dc34f593ee..f7c3fb8a0df 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.h
@@ -31,7 +31,6 @@
#include <deque>
-#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/repl/bgsync.h"
#include "mongo/db/repl/oplog_applier.h"
#include "mongo/db/repl/replication_coordinator_external_state.h"
@@ -43,8 +42,6 @@
#include "mongo/util/concurrency/thread_pool.h"
namespace mongo {
-class ServiceContext;
-
namespace repl {
class DropPendingCollectionReaper;
diff --git a/src/mongo/db/repl/replication_coordinator_impl.h b/src/mongo/db/repl/replication_coordinator_impl.h
index 223c3fc9b90..0ad0385ae6d 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_impl.h
@@ -36,7 +36,6 @@
#include "mongo/base/status.h"
#include "mongo/bson/timestamp.h"
-#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/concurrency/replication_state_transition_lock_guard.h"
#include "mongo/db/repl/delayable_timeout_callback.h"
#include "mongo/db/repl/initial_syncer.h"
diff --git a/src/mongo/db/repl/storage_interface.h b/src/mongo/db/repl/storage_interface.h
index a2aaff9153b..aff0634f035 100644
--- a/src/mongo/db/repl/storage_interface.h
+++ b/src/mongo/db/repl/storage_interface.h
@@ -45,12 +45,6 @@
#include "mongo/db/service_context.h"
namespace mongo {
-
-class Collection;
-class CollectionPtr;
-struct CollectionOptions;
-class OperationContext;
-
namespace repl {
struct TimestampedBSONObj {
diff --git a/src/mongo/db/repl/tenant_migration_recipient_service.cpp b/src/mongo/db/repl/tenant_migration_recipient_service.cpp
index 9c9038c3846..5901de19cd9 100644
--- a/src/mongo/db/repl/tenant_migration_recipient_service.cpp
+++ b/src/mongo/db/repl/tenant_migration_recipient_service.cpp
@@ -913,8 +913,8 @@ SemiFuture<void> TenantMigrationRecipientService::Instance::_initializeStateDoc(
.then([this, self = shared_from_this(), stateDoc = _stateDoc] {
auto opCtx = cc().makeOperationContext();
{
- Lock::ExclusiveLock stateDocInsertLock(
- opCtx.get(), opCtx->lockState(), _recipientService->_stateDocInsertMutex);
+ Lock::ExclusiveLock stateDocInsertLock(opCtx.get(),
+ _recipientService->_stateDocInsertMutex);
uassertStatusOK(
tenantMigrationRecipientEntryHelpers::insertStateDoc(opCtx.get(), stateDoc));
}
diff --git a/src/mongo/db/s/balancer_stats_registry.cpp b/src/mongo/db/s/balancer_stats_registry.cpp
index c2308b26d2d..bfd789acb52 100644
--- a/src/mongo/db/s/balancer_stats_registry.cpp
+++ b/src/mongo/db/s/balancer_stats_registry.cpp
@@ -27,7 +27,6 @@
* it in the license file.
*/
-
#include "mongo/db/s/balancer_stats_registry.h"
#include "mongo/db/dbdirectclient.h"
@@ -41,7 +40,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
namespace mongo {
namespace {
@@ -73,7 +71,7 @@ ScopedRangeDeleterLock::ScopedRangeDeleterLock(OperationContext* opCtx, const UU
: _configLock(opCtx, DatabaseName(boost::none, NamespaceString::kConfigDb), MODE_IX),
_rangeDeletionLock(opCtx, NamespaceString::kRangeDeletionNamespace, MODE_IX),
_collectionUuidLock(Lock::ResourceLock(
- opCtx->lockState(),
+ opCtx,
ResourceId(RESOURCE_MUTEX, "RangeDeleterCollLock::" + collectionUuid.toString()),
MODE_X)) {}
diff --git a/src/mongo/db/s/collection_sharding_runtime.h b/src/mongo/db/s/collection_sharding_runtime.h
index 47ed17a914e..94db822cf92 100644
--- a/src/mongo/db/s/collection_sharding_runtime.h
+++ b/src/mongo/db/s/collection_sharding_runtime.h
@@ -30,7 +30,6 @@
#pragma once
#include "mongo/bson/bsonobj.h"
-#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/s/collection_sharding_state.h"
#include "mongo/db/s/metadata_manager.h"
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp
index d85ab9fc272..a717b687596 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp
@@ -498,7 +498,7 @@ Status ShardingCatalogManager::setFeatureCompatibilityVersionOnShards(OperationC
const BSONObj& cmdObj) {
// No shards should be added until we have forwarded featureCompatibilityVersion to all shards.
- Lock::SharedLock lk(opCtx->lockState(), _kShardMembershipLock);
+ Lock::SharedLock lk(opCtx, _kShardMembershipLock);
// We do a direct read of the shards collection with local readConcern so no shards are missed,
// but don't go through the ShardRegistry to prevent it from caching data that may be rolled
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index b7ceee4717f..23083a77590 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -625,7 +625,7 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkSplit(
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk modifications and generate
// strictly monotonously increasing collection versions
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kChunkOpLock);
auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto findCollResponse = uassertStatusOK(
@@ -848,7 +848,7 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunksMerge(
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk modifications and generate
// strictly monotonously increasing collection versions
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kChunkOpLock);
// 1. Retrieve the initial collection version info to build up the logging info.
auto collVersion = uassertStatusOK(getCollectionVersion(opCtx, nss));
@@ -997,7 +997,7 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE();
// Must hold the shard lock until the entire commit finishes to serialize with removeShard.
- Lock::SharedLock shardLock(opCtx->lockState(), _kShardMembershipLock);
+ Lock::SharedLock shardLock(opCtx, _kShardMembershipLock);
auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto shardResult = uassertStatusOK(
@@ -1019,7 +1019,7 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk modifications and generate
// strictly monotonously increasing collection versions
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kChunkOpLock);
auto findCollResponse = uassertStatusOK(
configShard->exhaustiveFindOnConfig(opCtx,
@@ -1290,7 +1290,7 @@ void ShardingCatalogManager::upgradeChunksHistory(OperationContext* opCtx,
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk splits, merges, and
// migrations.
- Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kChunkOpLock);
auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
const auto coll = [&] {
@@ -1426,7 +1426,7 @@ void ShardingCatalogManager::clearJumboFlag(OperationContext* opCtx,
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk modifications and generate
// strictly monotonously increasing collection versions
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kChunkOpLock);
auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto findCollResponse = uassertStatusOK(
@@ -1547,7 +1547,7 @@ void ShardingCatalogManager::ensureChunkVersionIsGreaterThan(OperationContext* o
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk modifications and generate
// strictly monotonously increasing collection versions
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kChunkOpLock);
ScopeGuard earlyReturnBeforeDoingWriteGuard([&] {
// Ensure waiting for writeConcern of the data read.
@@ -1748,7 +1748,7 @@ void ShardingCatalogManager::bumpMultipleCollectionVersionsAndChangeMetadataInTx
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk splits, merges, and
// migrations
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kChunkOpLock);
withTransaction(
opCtx,
@@ -1792,7 +1792,7 @@ void ShardingCatalogManager::splitOrMarkJumbo(OperationContext* opCtx,
// means that a subsequent incremental refresh will not see it. However, it is being
// marked in memory through the call to 'markAsJumbo' above so subsequent balancer
// iterations will not consider it for migration.
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kChunkOpLock);
const auto findCollResponse = uassertStatusOK(configShard->exhaustiveFindOnConfig(
opCtx,
@@ -1857,7 +1857,7 @@ void ShardingCatalogManager::setAllowMigrationsAndBumpOneChunk(
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk splits, merges, and
// migrations
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kChunkOpLock);
const auto cm = uassertStatusOK(
Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx,
@@ -1934,7 +1934,7 @@ void ShardingCatalogManager::setChunkEstimatedSize(OperationContext* opCtx,
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk modifications and generate
// strictly monotonously increasing collection versions
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kChunkOpLock);
const auto chunkQuery = BSON(ChunkType::collectionUUID()
<< chunk.getCollectionUUID() << ChunkType::min(chunk.getMin())
@@ -1959,7 +1959,7 @@ void ShardingCatalogManager::setChunkEstimatedSize(OperationContext* opCtx,
bool ShardingCatalogManager::clearChunkEstimatedSize(OperationContext* opCtx, const UUID& uuid) {
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk splits, merges, and
// migrations
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kChunkOpLock);
const auto query = BSON(ChunkType::collectionUUID() << uuid);
const auto update = BSON("$unset" << BSON(ChunkType::estimatedSizeBytes() << ""));
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
index 5624b5dec37..19908f5d068 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
@@ -297,8 +297,8 @@ void ShardingCatalogManager::refineCollectionShardKey(OperationContext* opCtx,
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk modifications and generate
// strictly monotonously increasing collection versions
- Lock::ExclusiveLock chunkLk(opCtx, opCtx->lockState(), _kChunkOpLock);
- Lock::ExclusiveLock zoneLk(opCtx, opCtx->lockState(), _kZoneOpLock);
+ Lock::ExclusiveLock chunkLk(opCtx, _kChunkOpLock);
+ Lock::ExclusiveLock zoneLk(opCtx, _kZoneOpLock);
struct RefineTimers {
Timer executionTimer;
@@ -546,7 +546,7 @@ void ShardingCatalogManager::configureCollectionBalancing(
{
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk splits, merges, and
// migrations
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kChunkOpLock);
withTransaction(opCtx,
CollectionType::ConfigNS,
@@ -592,7 +592,7 @@ void ShardingCatalogManager::applyLegacyConfigurationToSessionsCollection(Operat
auto updateStmt = BSON("$unset" << BSON(CollectionType::kMaxChunkSizeBytesFieldName << 0));
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk splits, merges, and
// migrations
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kChunkOpLock);
withTransaction(opCtx,
CollectionType::ConfigNS,
@@ -641,8 +641,8 @@ void ShardingCatalogManager::renameShardedMetadata(
boost::optional<CollectionType> optFromCollType) {
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk modifications and generate
// strictly monotonously increasing collection versions
- Lock::ExclusiveLock chunkLk(opCtx, opCtx->lockState(), _kChunkOpLock);
- Lock::ExclusiveLock zoneLk(opCtx, opCtx->lockState(), _kZoneOpLock);
+ Lock::ExclusiveLock chunkLk(opCtx, _kChunkOpLock);
+ Lock::ExclusiveLock zoneLk(opCtx, _kZoneOpLock);
std::string logMsg = str::stream() << from << " to " << to;
if (optFromCollType) {
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
index c3bb155e702..95328a9b248 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
@@ -251,7 +251,7 @@ void ShardingCatalogManager::commitMovePrimary(OperationContext* opCtx,
const DatabaseVersion& expectedDbVersion,
const ShardId& toShard) {
// Hold the shard lock until the entire commit finishes to serialize with removeShard.
- Lock::SharedLock shardLock(opCtx->lockState(), _kShardMembershipLock);
+ Lock::SharedLock shardLock(opCtx, _kShardMembershipLock);
const auto updateOp = [&] {
const auto query = [&] {
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
index 355c0b5520d..bd170413ab5 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
@@ -585,7 +585,7 @@ StatusWith<std::string> ShardingCatalogManager::addShard(
const auto shardRegistry = Grid::get(opCtx)->shardRegistry();
// Only one addShard operation can be in progress at a time.
- Lock::ExclusiveLock lk(opCtx->lockState(), _kShardMembershipLock);
+ Lock::ExclusiveLock lk(opCtx, _kShardMembershipLock);
// Check if this shard has already been added (can happen in the case of a retry after a network
// error, for example) and thus this addShard request should be considered a no-op.
@@ -804,7 +804,7 @@ RemoveShardProgress ShardingCatalogManager::removeShard(OperationContext* opCtx,
const auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
- Lock::ExclusiveLock shardLock(opCtx->lockState(), _kShardMembershipLock);
+ Lock::ExclusiveLock shardLock(opCtx, _kShardMembershipLock);
auto findShardResponse = uassertStatusOK(
configShard->exhaustiveFindOnConfig(opCtx,
@@ -955,7 +955,7 @@ RemoveShardProgress ShardingCatalogManager::removeShard(OperationContext* opCtx,
}
Lock::SharedLock ShardingCatalogManager::enterStableTopologyRegion(OperationContext* opCtx) {
- return Lock::SharedLock(opCtx->lockState(), _kShardMembershipLock);
+ return Lock::SharedLock(opCtx, _kShardMembershipLock);
}
void ShardingCatalogManager::appendConnectionStats(executor::ConnectionPoolStats* stats) {
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
index e8739c4ec62..049a73d2cf4 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
@@ -213,7 +213,7 @@ Status checkForTimeseriesTimeFieldKeyRange(const ChunkRange& range, StringData t
Status ShardingCatalogManager::addShardToZone(OperationContext* opCtx,
const std::string& shardName,
const std::string& zoneName) {
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kZoneOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kZoneOpLock);
auto updateStatus = Grid::get(opCtx)->catalogClient()->updateConfigDocument(
opCtx,
@@ -238,7 +238,7 @@ Status ShardingCatalogManager::addShardToZone(OperationContext* opCtx,
Status ShardingCatalogManager::removeShardFromZone(OperationContext* opCtx,
const std::string& shardName,
const std::string& zoneName) {
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kZoneOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kZoneOpLock);
auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
const NamespaceString shardNS(NamespaceString::kConfigsvrShardsNamespace);
@@ -316,7 +316,7 @@ void ShardingCatalogManager::assignKeyRangeToZone(OperationContext* opCtx,
auto configServer = Grid::get(opCtx)->shardRegistry()->getConfigShard();
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kZoneOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kZoneOpLock);
auto zoneDoc = uassertStatusOK(configServer->exhaustiveFindOnConfig(
opCtx,
@@ -375,7 +375,7 @@ void ShardingCatalogManager::removeKeyRangeFromZone(OperationContext* opCtx,
const ChunkRange& givenRange) {
auto configServer = Grid::get(opCtx)->shardRegistry()->getConfigShard();
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kZoneOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kZoneOpLock);
ChunkRange actualRange = givenRange;
KeyPattern keyPattern;
diff --git a/src/mongo/db/s/database_sharding_state.cpp b/src/mongo/db/s/database_sharding_state.cpp
index da908b6ea38..541df998f01 100644
--- a/src/mongo/db/s/database_sharding_state.cpp
+++ b/src/mongo/db/s/database_sharding_state.cpp
@@ -77,7 +77,7 @@ const ServiceContext::Decoration<DatabaseShardingStateMap> DatabaseShardingState
DatabaseShardingState::ScopedDatabaseShardingState::ScopedDatabaseShardingState(
OperationContext* opCtx, const DatabaseName& dbName, LockMode mode)
- : _lock(nullptr, opCtx->lockState(), ResourceId(RESOURCE_MUTEX, dbName), mode),
+ : _lock(opCtx->lockState(), ResourceId(RESOURCE_MUTEX, dbName), mode),
_dss(DatabaseShardingStateMap::get(opCtx->getServiceContext()).getOrCreate(dbName)) {}
DatabaseShardingState::ScopedDatabaseShardingState::ScopedDatabaseShardingState(
diff --git a/src/mongo/db/s/persistent_task_queue.h b/src/mongo/db/s/persistent_task_queue.h
index 32a1b4210fc..c8d94b4bdf6 100644
--- a/src/mongo/db/s/persistent_task_queue.h
+++ b/src/mongo/db/s/persistent_task_queue.h
@@ -120,8 +120,6 @@ PersistentTaskQueue<T>::PersistentTaskQueue(OperationContext* opCtx, NamespaceSt
template <typename T>
TaskId PersistentTaskQueue<T>::push(OperationContext* opCtx, const T& t) {
- DBDirectClient dbClient(opCtx);
-
TaskId recordId = 0;
BSONObjBuilder builder;
@@ -134,6 +132,7 @@ TaskId PersistentTaskQueue<T>::push(OperationContext* opCtx, const T& t) {
builder.append("_id", recordId);
builder.append("task", t.toBSON());
+ DBDirectClient dbClient(opCtx);
auto response = write_ops::checkWriteErrors(
dbClient.insert(write_ops::InsertCommandRequest(_storageNss, {builder.obj()})));
_count++;
@@ -171,13 +170,12 @@ TaskId PersistentTaskQueue<T>::pop(OperationContext* opCtx) {
template <typename T>
const typename BlockingTaskQueue<T>::Record& PersistentTaskQueue<T>::peek(OperationContext* opCtx) {
- DBDirectClient client(opCtx);
-
Lock::ExclusiveLock lock(opCtx->lockState(), _mutex);
opCtx->waitForConditionOrInterrupt(_cv, lock, [this] { return _count > 0 || _closed; });
uassert(ErrorCodes::Interrupted, "Task queue was closed", !_closed);
+ DBDirectClient client(opCtx);
_currentFront = _loadNextRecord(client);
uassert(ErrorCodes::InternalError, "Task queue is in an invalid state.", _currentFront);
@@ -195,6 +193,7 @@ void PersistentTaskQueue<T>::close(OperationContext* opCtx) {
template <typename T>
size_t PersistentTaskQueue<T>::size(OperationContext* opCtx) const {
Lock::ExclusiveLock lock(opCtx->lockState(), _mutex);
+
return _count;
}
diff --git a/src/mongo/db/s/persistent_task_queue_test.cpp b/src/mongo/db/s/persistent_task_queue_test.cpp
index 17b7a568613..06e9ad5514d 100644
--- a/src/mongo/db/s/persistent_task_queue_test.cpp
+++ b/src/mongo/db/s/persistent_task_queue_test.cpp
@@ -241,8 +241,8 @@ TEST_F(PersistentTaskQueueTest, TestWakeupOnEmptyQueue) {
auto opCtx = operationContext();
PersistentTaskQueue<TestTask> q(opCtx, kNss);
- auto result = stdx::async(stdx::launch::async, [&q] {
- ThreadClient tc("RangeDeletionService", getGlobalServiceContext());
+ auto result = stdx::async(stdx::launch::async, [this, &q] {
+ ThreadClient tc("TestWakeupOnEmptyQueue", getServiceContext());
auto opCtx = tc->makeOperationContext();
stdx::this_thread::sleep_for(stdx::chrono::milliseconds(500));
@@ -261,8 +261,8 @@ TEST_F(PersistentTaskQueueTest, TestInterruptedWhileWaitingOnCV) {
unittest::Barrier barrier(2);
- auto result = stdx::async(stdx::launch::async, [opCtx, &q, &barrier] {
- ThreadClient tc("RangeDeletionService", getGlobalServiceContext());
+ auto result = stdx::async(stdx::launch::async, [this, &q, &barrier] {
+ ThreadClient tc("TestInterruptedWhileWaitingOnCV", getServiceContext());
auto opCtx = tc->makeOperationContext();
barrier.countDownAndWait();
@@ -285,8 +285,8 @@ TEST_F(PersistentTaskQueueTest, TestKilledOperationContextWhileWaitingOnCV) {
unittest::Barrier barrier(2);
- auto result = stdx::async(stdx::launch::async, [opCtx, &q, &barrier] {
- ThreadClient tc("RangeDeletionService", getGlobalServiceContext());
+ auto result = stdx::async(stdx::launch::async, [this, &q, &barrier] {
+ ThreadClient tc("TestKilledOperationContextWhileWaitingOnCV", getServiceContext());
{
stdx::lock_guard<Client> lk(*tc.get());
tc->setSystemOperationKillableByStepdown(lk);
diff --git a/src/mongo/db/s/range_deletion_util.h b/src/mongo/db/s/range_deletion_util.h
index d720a0ab83a..dd78e97db6f 100644
--- a/src/mongo/db/s/range_deletion_util.h
+++ b/src/mongo/db/s/range_deletion_util.h
@@ -31,7 +31,6 @@
#include <boost/optional.hpp>
#include <list>
-#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/s/range_deletion_task_gen.h"
#include "mongo/executor/task_executor.h"
diff --git a/src/mongo/db/s/resharding/resharding_util.cpp b/src/mongo/db/s/resharding/resharding_util.cpp
index f7d20c6d813..5e3721c89f2 100644
--- a/src/mongo/db/s/resharding/resharding_util.cpp
+++ b/src/mongo/db/s/resharding/resharding_util.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/s/resharding/resharding_util.h"
#include <fmt/format.h>
@@ -61,7 +58,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kResharding
-
namespace mongo {
namespace resharding {
diff --git a/src/mongo/db/s/resharding/resharding_util.h b/src/mongo/db/s/resharding/resharding_util.h
index 7c021858971..1b55c70a755 100644
--- a/src/mongo/db/s/resharding/resharding_util.h
+++ b/src/mongo/db/s/resharding/resharding_util.h
@@ -43,7 +43,6 @@
#include "mongo/s/catalog/type_tags.h"
#include "mongo/s/chunk_manager.h"
#include "mongo/s/resharding/common_types_gen.h"
-#include "mongo/util/str.h"
namespace mongo {
namespace resharding {
@@ -298,6 +297,7 @@ boost::optional<Milliseconds> estimateRemainingRecipientTime(bool applyingBegan,
int64_t oplogEntriesApplied,
int64_t oplogEntriesFetched,
Milliseconds timeSpentApplying);
+
/**
* Looks up the StateMachine by namespace of the collection being resharded. If it does not exist,
* returns boost::none.
@@ -322,5 +322,4 @@ std::vector<std::shared_ptr<Instance>> getReshardingStateMachines(OperationConte
}
} // namespace resharding
-
} // namespace mongo
diff --git a/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.h b/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.h
index 1c3018df228..26d9337599e 100644
--- a/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.h
+++ b/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.h
@@ -32,12 +32,10 @@
#include <boost/shared_array.hpp>
#include <map>
-#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/storage/record_store.h"
#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/with_lock.h"
-
namespace mongo {
/**
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
index 7207a773d6b..2ae24e609d2 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
@@ -39,7 +39,6 @@
#include "mongo/bson/ordering.h"
#include "mongo/bson/timestamp.h"
-#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/storage/backup_block.h"
#include "mongo/db/storage/durable_catalog.h"
#include "mongo/db/storage/kv/kv_engine.h"
diff --git a/src/mongo/db/timeseries/bucket_catalog.h b/src/mongo/db/timeseries/bucket_catalog.h
index c3dc40c915d..ea776644a58 100644
--- a/src/mongo/db/timeseries/bucket_catalog.h
+++ b/src/mongo/db/timeseries/bucket_catalog.h
@@ -29,11 +29,11 @@
#pragma once
-#include "mongo/bson/bsonobjbuilder.h"
#include <boost/container/small_vector.hpp>
#include <boost/container/static_vector.hpp>
#include <queue>
+#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/bson/unordered_fields_bsonobj_comparator.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/ops/single_write_result_gen.h"
diff --git a/src/mongo/db/transaction/transaction_participant.h b/src/mongo/db/transaction/transaction_participant.h
index a76851815c6..ad7d1cb14ff 100644
--- a/src/mongo/db/transaction/transaction_participant.h
+++ b/src/mongo/db/transaction/transaction_participant.h
@@ -37,7 +37,6 @@
#include "mongo/db/catalog/uncommitted_multikey.h"
#include "mongo/db/commands/txn_cmds_gen.h"
#include "mongo/db/concurrency/d_concurrency.h"
-#include "mongo/db/concurrency/locker.h"
#include "mongo/db/multi_key_path_tracker.h"
#include "mongo/db/ops/update_request.h"
#include "mongo/db/repl/oplog.h"
@@ -61,8 +60,6 @@
namespace mongo {
-class OperationContext;
-
/**
* Reason a transaction was terminated.
*/
diff --git a/src/mongo/dbtests/framework.cpp b/src/mongo/dbtests/framework.cpp
index bfdfc2cd1ab..99507685e8e 100644
--- a/src/mongo/dbtests/framework.cpp
+++ b/src/mongo/dbtests/framework.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/dbtests/framework.h"
#include <string>
@@ -40,7 +37,6 @@
#include "mongo/db/catalog/collection_impl.h"
#include "mongo/db/catalog/database_holder_impl.h"
#include "mongo/db/client.h"
-#include "mongo/db/concurrency/lock_state.h"
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/index/index_access_method_factory_impl.h"
#include "mongo/db/index_builds_coordinator_mongod.h"
@@ -65,7 +61,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault
-
namespace mongo {
namespace dbtests {