summaryrefslogtreecommitdiff
path: root/src/mongo/db/concurrency/lock_state.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db/concurrency/lock_state.cpp')
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp95
1 files changed, 9 insertions, 86 deletions
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index c97430363c1..e4df0cb2528 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -36,7 +36,6 @@
#include <vector>
-#include "mongo/db/concurrency/replication_lock_manager_manipulator.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/service_context.h"
#include "mongo/platform/compiler.h"
@@ -123,19 +122,9 @@ AtomicUInt64 idCounter(0);
// Partitioned global lock statistics, so we don't hit the same bucket
PartitionedInstanceWideLockStats globalStats;
-// When this is set, lock acquisitions for the global resource go into the TemporaryRequestQueue
-// stored in this decoration instead of letting the LockManager look up and put the requests into
-// the true global resource state.
-OperationContext::Decoration<LockManager::TemporaryResourceQueue*> globalResourceShadow =
- OperationContext::declareDecoration<LockManager::TemporaryResourceQueue*>();
-
} // namespace
bool LockerImpl::_shouldDelayUnlock(ResourceId resId, LockMode mode) const {
- if (_prepareModeForLockYields) {
- return false;
- }
-
switch (resId.getType()) {
case RESOURCE_MUTEX:
return false;
@@ -276,8 +265,7 @@ LockerImpl::~LockerImpl() {
invariant(!inAWriteUnitOfWork());
invariant(_numResourcesToUnlockAtEndUnitOfWork == 0);
invariant(_requests.empty());
- invariant(_modeForTicket == MODE_NONE,
- str::stream() << "_modeForTicket found: " << _modeForTicket);
+ invariant(_modeForTicket == MODE_NONE);
// Reset the locking statistics so the object can be reused
_stats.reset();
@@ -363,7 +351,7 @@ LockResult LockerImpl::_lockGlobalBegin(OperationContext* opCtx, LockMode mode,
}
LockMode actualLockMode = mode;
- if (opCtx && opCtx->getServiceContext()) {
+ if (opCtx) {
auto storageEngine = opCtx->getServiceContext()->getStorageEngine();
if (storageEngine && !storageEngine->supportsDBLocking()) {
actualLockMode = isSharedLockMode(mode) ? MODE_S : MODE_X;
@@ -375,7 +363,7 @@ LockResult LockerImpl::_lockGlobalBegin(OperationContext* opCtx, LockMode mode,
// Currently, deadlock detection does not happen inline with lock acquisition so the only
// unsuccessful result that the lock manager would return is LOCK_WAITING.
- invariant(result == LOCK_WAITING, str::stream() << "Unexpected lock result: " << result);
+ invariant(result == LOCK_WAITING);
return result;
}
@@ -593,18 +581,9 @@ boost::optional<Locker::LockerInfo> LockerImpl::getLockerInfo(
return std::move(lockerInfo);
}
-bool LockerImpl::saveLockStateAndUnlockForPrepare(Locker::LockSnapshot* stateOut) {
- invariant(!_prepareModeForLockYields);
- _prepareModeForLockYields = true;
- ON_BLOCK_EXIT([&] { _prepareModeForLockYields = false; });
- return saveLockStateAndUnlock(stateOut);
-}
-
bool LockerImpl::saveLockStateAndUnlock(Locker::LockSnapshot* stateOut) {
- // We shouldn't be saving and restoring lock state from inside a WriteUnitOfWork, excepting the
- // special behavior for saving/restoring locks for prepared transactions during repl state
- // transitions.
- invariant(!inAWriteUnitOfWork() || _prepareModeForLockYields);
+ // We shouldn't be saving and restoring lock state from inside a WriteUnitOfWork.
+ invariant(!inAWriteUnitOfWork());
// Clear out whatever is in stateOut.
stateOut->locks.clear();
@@ -661,10 +640,8 @@ bool LockerImpl::saveLockStateAndUnlock(Locker::LockSnapshot* stateOut) {
}
void LockerImpl::restoreLockState(OperationContext* opCtx, const Locker::LockSnapshot& state) {
- // We shouldn't be saving and restoring lock state from inside a WriteUnitOfWork, excepting the
- // special behavior for saving/restoring locks for prepared transactions during repl state
- // transitions.
- invariant(!inAWriteUnitOfWork() || _prepareModeForLockYields);
+ // We shouldn't be saving and restoring lock state from inside a WriteUnitOfWork.
+ invariant(!inAWriteUnitOfWork());
invariant(_modeForTicket == MODE_NONE);
std::vector<OneLock>::const_iterator it = state.locks.begin();
@@ -681,47 +658,6 @@ void LockerImpl::restoreLockState(OperationContext* opCtx, const Locker::LockSna
invariant(_modeForTicket != MODE_NONE);
}
-void LockerImpl::restoreLockStateWithTemporaryGlobalResource(
- OperationContext* opCtx,
- const LockSnapshot& state,
- LockManager::TemporaryResourceQueue* tempGlobalResource) {
- invariant(tempGlobalResource->getResourceId().getType() == ResourceType::RESOURCE_GLOBAL);
- invariant(globalResourceShadow(opCtx) == nullptr);
- invariant(!_prepareModeForLockYields);
-
- globalResourceShadow(opCtx) = tempGlobalResource;
- _prepareModeForLockYields = true;
- ON_BLOCK_EXIT([&] {
- globalResourceShadow(opCtx) = nullptr;
- _prepareModeForLockYields = false;
- });
-
- restoreLockState(opCtx, state);
-}
-
-void LockerImpl::replaceGlobalLockStateWithTemporaryGlobalResource(
- LockManager::TemporaryResourceQueue* tempGlobalResource) {
- invariant(tempGlobalResource->getResourceId().getType() == ResourceType::RESOURCE_GLOBAL);
-
- // Transfer the LockRequests from tempGlobalResource into the true resource for the global lock
- // that is managed by the LockManager. This also removes the existing MODE_X LockRequest from
- // the granted list for the true global resource, but we still need to delete that LockRequest
- // and remove it from this Locker's _requests list.
- ReplicationLockManagerManipulator(&globalLockManager)
- .replaceGlobalLocksWithLocksFromTemporaryGlobalResource(resourceIdGlobal,
- tempGlobalResource);
-
- // Release the ticket that this Locker was holding for the global X lock.
- invariant(_modeForTicket == MODE_X);
- _releaseTicket();
- _modeForTicket = MODE_NONE;
-
- // Now fully delete the LockRequest.
- auto it = _requests.find(resourceIdGlobal);
- scoped_spinlock scopedLock(_lock);
- it.remove();
-}
-
LockResult LockerImpl::lockBegin(OperationContext* opCtx, ResourceId resId, LockMode mode) {
dassert(!getWaitingResource().isValid());
@@ -777,21 +713,8 @@ LockResult LockerImpl::lockBegin(OperationContext* opCtx, ResourceId resId, Lock
// otherwise we might reset state if the lock becomes granted very fast.
_notify.clear();
- LockResult result{LockResult::LOCK_INVALID};
- if (resId == resourceIdGlobal && opCtx && globalResourceShadow(opCtx)) {
- // If we're trying to lock the global resource and we have a temporary global resource
- // installed, use the temporary resource instead of letting the LockManager look up the
- // true resource for the global lock.
- invariant(isNew);
- ReplicationLockManagerManipulator(&globalLockManager)
- .lockUncontestedTemporaryGlobalResource(globalResourceShadow(opCtx), request, mode);
- // lockUncontestedTemporaryGlobalResource can't fail.
- result = LockResult::LOCK_OK;
- } else {
- // Normal case using the true global lock head.
- result = isNew ? globalLockManager.lock(resId, request, mode)
- : globalLockManager.convert(resId, request, mode);
- }
+ LockResult result = isNew ? globalLockManager.lock(resId, request, mode)
+ : globalLockManager.convert(resId, request, mode);
if (result == LOCK_WAITING) {
globalStats.recordWait(_id, resId, mode);