summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/concurrency/lock_manager.h8
-rw-r--r--src/mongo/db/concurrency/lock_manager_defs.h9
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp17
-rw-r--r--src/mongo/db/concurrency/lock_state.h1
-rw-r--r--src/mongo/db/concurrency/lock_state_test.cpp55
-rw-r--r--src/mongo/db/concurrency/lock_stats.cpp18
-rw-r--r--src/mongo/db/concurrency/lock_stats.h8
-rw-r--r--src/mongo/db/concurrency/lock_stats_test.cpp5
8 files changed, 11 insertions, 110 deletions
diff --git a/src/mongo/db/concurrency/lock_manager.h b/src/mongo/db/concurrency/lock_manager.h
index 75922ac7c18..22749de9b5a 100644
--- a/src/mongo/db/concurrency/lock_manager.h
+++ b/src/mongo/db/concurrency/lock_manager.h
@@ -73,10 +73,10 @@ public:
* return value is not LOCK_WAITING, this pointer can be freed and will
* not be used any more.
*
- * If the return value is LOCK_WAITING, the notification method will be
- * called at some point into the future, when the lock either becomes
- * granted or a deadlock is discovered. If unlock is called before the
- * lock becomes granted, the notification will not be invoked.
+ * If the return value is LOCK_WAITING, the notification method will be called
+ * at some point into the future, when the lock becomes granted. If unlock is
+ * called before the lock becomes granted, the notification will not be
+ * invoked.
*
* If the return value is LOCK_WAITING, the notification object *must*
* live at least until the notify method has been invoked or unlock has
diff --git a/src/mongo/db/concurrency/lock_manager_defs.h b/src/mongo/db/concurrency/lock_manager_defs.h
index 4dabc7f7790..130a4e7af21 100644
--- a/src/mongo/db/concurrency/lock_manager_defs.h
+++ b/src/mongo/db/concurrency/lock_manager_defs.h
@@ -130,15 +130,6 @@ enum LockResult {
LOCK_TIMEOUT,
/**
- * The lock request was not granted because it would result in a deadlock. No changes to
- * the state of the Locker would be made if this value is returned (i.e., it will not be
- * killed due to deadlock). It is up to the caller to decide how to recover from this
- * return value - could be either release some locks and try again, or just bail with an
- * error and have some upper code handle it.
- */
- LOCK_DEADLOCK,
-
- /**
* This is used as an initializer value. Should never be returned.
*/
LOCK_INVALID
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index 902afbda01b..555a031570c 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -71,10 +71,6 @@ public:
_get(id).recordWaitTime(resId, mode, waitMicros);
}
- void recordDeadlock(ResourceId resId, LockMode mode) {
- _get(resId).recordDeadlock(resId, mode);
- }
-
void report(SingleThreadedLockStats* outStats) const {
for (int i = 0; i < NumPartitions; i++) {
outStats->append(_partitions[i].stats);
@@ -114,7 +110,7 @@ LockManager globalLockManager;
const ResourceId resourceIdGlobal = ResourceId(RESOURCE_GLOBAL, ResourceId::SINGLETON_GLOBAL);
// How often (in millis) to check for deadlock if a lock has not been granted for some time
-const Milliseconds DeadlockTimeout = Milliseconds(500);
+const Milliseconds MaxWaitTime = Milliseconds(500);
// Dispenses unique LockerId identifiers
AtomicUInt64 idCounter(0);
@@ -361,8 +357,6 @@ LockResult LockerImpl::_lockGlobalBegin(OperationContext* opCtx, LockMode mode,
if (result == LOCK_OK)
return LOCK_OK;
- // Currently, deadlock detection does not happen inline with lock acquisition so the only
- // unsuccessful result that the lock manager would return is LOCK_WAITING.
invariant(result == LOCK_WAITING);
return result;
@@ -433,8 +427,6 @@ LockResult LockerImpl::lock(OperationContext* opCtx,
if (result == LOCK_OK)
return LOCK_OK;
- // Currently, deadlock detection does not happen inline with lock acquisition so the only
- // unsuccessful result that the lock manager would return is LOCK_WAITING.
invariant(result == LOCK_WAITING);
return lockComplete(opCtx, resId, mode, deadline);
@@ -758,9 +750,8 @@ LockResult LockerImpl::lockComplete(OperationContext* opCtx,
timeout = std::min(timeout, _maxLockTimeout.get());
}
- // Don't go sleeping without bound in order to be able to report long waits or wake up for
- // deadlock detection.
- Milliseconds waitTime = std::min(timeout, DeadlockTimeout);
+ // Don't go sleeping without bound in order to be able to report long waits.
+ Milliseconds waitTime = std::min(timeout, MaxWaitTime);
const uint64_t startOfTotalWaitTime = curTimeMicros64();
uint64_t startOfCurrentWaitTime = startOfTotalWaitTime;
@@ -800,7 +791,7 @@ LockResult LockerImpl::lockComplete(OperationContext* opCtx,
const auto totalBlockTime = duration_cast<Milliseconds>(
Microseconds(int64_t(curTimeMicros - startOfTotalWaitTime)));
- waitTime = (totalBlockTime < timeout) ? std::min(timeout - totalBlockTime, DeadlockTimeout)
+ waitTime = (totalBlockTime < timeout) ? std::min(timeout - totalBlockTime, MaxWaitTime)
: Milliseconds(0);
if (waitTime == Milliseconds(0)) {
diff --git a/src/mongo/db/concurrency/lock_state.h b/src/mongo/db/concurrency/lock_state.h
index 6d7278153ac..dc5f73e7273 100644
--- a/src/mongo/db/concurrency/lock_state.h
+++ b/src/mongo/db/concurrency/lock_state.h
@@ -231,7 +231,6 @@ public:
* @param mode Mode which was passed to an earlier lockBegin call. Must match.
* @param deadline The absolute time point when this lock acquisition will time out, if not yet
* granted.
- * @param checkDeadlock whether to perform deadlock detection while waiting.
*/
LockResult lockComplete(OperationContext* opCtx,
ResourceId resId,
diff --git a/src/mongo/db/concurrency/lock_state_test.cpp b/src/mongo/db/concurrency/lock_state_test.cpp
index ba3c3e32079..e2055f11724 100644
--- a/src/mongo/db/concurrency/lock_state_test.cpp
+++ b/src/mongo/db/concurrency/lock_state_test.cpp
@@ -236,57 +236,6 @@ TEST(LockerImpl, DefaultLocker) {
ASSERT(locker.unlockGlobal());
}
-TEST(LockerImpl, CanceledDeadlockUnblocks) {
- const ResourceId db1(RESOURCE_DATABASE, "db1"_sd);
- const ResourceId db2(RESOURCE_DATABASE, "db2"_sd);
-
- LockerImpl locker1;
- LockerImpl locker2;
- LockerImpl locker3;
-
- ASSERT(LOCK_OK == locker1.lockGlobal(MODE_IX));
- ASSERT(LOCK_OK == locker1.lock(db1, MODE_S));
-
- ASSERT(LOCK_OK == locker2.lockGlobal(MODE_IX));
- ASSERT(LOCK_OK == locker2.lock(db2, MODE_X));
-
- // Set up locker1 and locker2 for deadlock
- ASSERT(LOCK_WAITING == locker1.lockBegin(nullptr, db2, MODE_X));
- ASSERT(LOCK_WAITING == locker2.lockBegin(nullptr, db1, MODE_X));
-
- // Locker3 blocks behind locker 2
- ASSERT(LOCK_OK == locker3.lockGlobal(MODE_IX));
- ASSERT(LOCK_WAITING == locker3.lockBegin(nullptr, db1, MODE_S));
-
- // Detect deadlock, canceling our request
- ASSERT(
- LOCK_DEADLOCK ==
- locker2.lockComplete(db1, MODE_X, Date_t::now() + Milliseconds(1), /*checkDeadlock*/ true));
-
- // Now locker3 must be able to complete its request
- ASSERT(LOCK_OK ==
- locker3.lockComplete(
- db1, MODE_S, Date_t::now() + Milliseconds(1), /*checkDeadlock*/ false));
-
- // Locker1 still can't complete its request
- ASSERT(LOCK_TIMEOUT ==
- locker1.lockComplete(db2, MODE_X, Date_t::now() + Milliseconds(1), false));
-
- // Check ownership for db1
- ASSERT(locker1.getLockMode(db1) == MODE_S);
- ASSERT(locker2.getLockMode(db1) == MODE_NONE);
- ASSERT(locker3.getLockMode(db1) == MODE_S);
-
- // Check ownership for db2
- ASSERT(locker1.getLockMode(db2) == MODE_NONE);
- ASSERT(locker2.getLockMode(db2) == MODE_X);
- ASSERT(locker3.getLockMode(db2) == MODE_NONE);
-
- ASSERT(locker1.unlockGlobal());
- ASSERT(locker2.unlockGlobal());
- ASSERT(locker3.unlockGlobal());
-}
-
TEST(LockerImpl, SharedLocksShouldTwoPhaseLockIsTrue) {
// Test that when setSharedLocksShouldTwoPhaseLock is true and we are in a WUOW, unlock on IS
// and S locks are postponed until endWriteUnitOfWork() is called. Mode IX and X locks always
@@ -526,9 +475,7 @@ TEST(LockerImpl, GetLockerInfoShouldReportPendingLocks) {
ASSERT(successfulLocker.unlock(dbId));
ASSERT(successfulLocker.unlockGlobal());
- const bool checkDeadlock = false;
- ASSERT_EQ(LOCK_OK,
- conflictingLocker.lockComplete(collectionId, MODE_IS, Date_t::now(), checkDeadlock));
+ ASSERT_EQ(LOCK_OK, conflictingLocker.lockComplete(collectionId, MODE_IS, Date_t::now()));
conflictingLocker.getLockerInfo(&lockerInfo, boost::none);
ASSERT_FALSE(lockerInfo.waitingResource.isValid());
diff --git a/src/mongo/db/concurrency/lock_stats.cpp b/src/mongo/db/concurrency/lock_stats.cpp
index 20049aeeea7..b4e1105c081 100644
--- a/src/mongo/db/concurrency/lock_stats.cpp
+++ b/src/mongo/db/concurrency/lock_stats.cpp
@@ -115,24 +115,6 @@ void LockStats<CounterType>::_report(BSONObjBuilder* builder,
}
}
}
-
- // Deadlocks
- {
- std::unique_ptr<BSONObjBuilder> deadlockCount;
- for (int mode = 1; mode < LockModesCount; mode++) {
- const long long value = CounterOps::get(stat.modeStats[mode].numDeadlocks);
- if (value > 0) {
- if (!deadlockCount) {
- if (!section) {
- section.reset(new BSONObjBuilder(builder->subobjStart(sectionName)));
- }
-
- deadlockCount.reset(new BSONObjBuilder(section->subobjStart("deadlockCount")));
- }
- deadlockCount->append(legacyModeName(static_cast<LockMode>(mode)), value);
- }
- }
- }
}
template <typename CounterType>
diff --git a/src/mongo/db/concurrency/lock_stats.h b/src/mongo/db/concurrency/lock_stats.h
index b92ed49ccb8..6add6cfefa6 100644
--- a/src/mongo/db/concurrency/lock_stats.h
+++ b/src/mongo/db/concurrency/lock_stats.h
@@ -82,7 +82,6 @@ struct LockStatCounters {
CounterOps::add(numAcquisitions, other.numAcquisitions);
CounterOps::add(numWaits, other.numWaits);
CounterOps::add(combinedWaitTimeMicros, other.combinedWaitTimeMicros);
- CounterOps::add(numDeadlocks, other.numDeadlocks);
}
template <typename OtherType>
@@ -90,21 +89,18 @@ struct LockStatCounters {
CounterOps::add(numAcquisitions, -other.numAcquisitions);
CounterOps::add(numWaits, -other.numWaits);
CounterOps::add(combinedWaitTimeMicros, -other.combinedWaitTimeMicros);
- CounterOps::add(numDeadlocks, -other.numDeadlocks);
}
void reset() {
CounterOps::set(numAcquisitions, 0);
CounterOps::set(numWaits, 0);
CounterOps::set(combinedWaitTimeMicros, 0);
- CounterOps::set(numDeadlocks, 0);
}
CounterType numAcquisitions;
CounterType numWaits;
CounterType combinedWaitTimeMicros;
- CounterType numDeadlocks;
};
@@ -135,10 +131,6 @@ public:
CounterOps::add(get(resId, mode).combinedWaitTimeMicros, waitMicros);
}
- void recordDeadlock(ResourceId resId, LockMode mode) {
- CounterOps::add(get(resId, mode).numDeadlocks, 1);
- }
-
LockStatCountersType& get(ResourceId resId, LockMode mode) {
if (resId == resourceIdOplog) {
return _oplogStats.modeStats[mode];
diff --git a/src/mongo/db/concurrency/lock_stats_test.cpp b/src/mongo/db/concurrency/lock_stats_test.cpp
index b14f86a1d95..e7a58641748 100644
--- a/src/mongo/db/concurrency/lock_stats_test.cpp
+++ b/src/mongo/db/concurrency/lock_stats_test.cpp
@@ -68,9 +68,8 @@ TEST(LockStats, Wait) {
ASSERT_EQUALS(LOCK_WAITING, lockerConflict.lockBegin(nullptr, resId, MODE_S));
// Sleep 1 millisecond so the wait time passes
- ASSERT_EQUALS(
- LOCK_TIMEOUT,
- lockerConflict.lockComplete(resId, MODE_S, Date_t::now() + Milliseconds(5), false));
+ ASSERT_EQUALS(LOCK_TIMEOUT,
+ lockerConflict.lockComplete(resId, MODE_S, Date_t::now() + Milliseconds(5)));
}
// Make sure that the waits/blocks are non-zero