summaryrefslogtreecommitdiff
path: root/src/mongo/db/concurrency/lock_state.cpp
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2015-01-27 13:01:45 -0500
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2015-02-11 08:50:52 -0500
commit6e2d6ae03be6592d95334c43a726cde10247bd24 (patch)
treed8a2b6191d9589591b292a783a1c306201f2abf3 /src/mongo/db/concurrency/lock_state.cpp
parent1c478891a220758259eff7fd6bb1dc0a253a9aa5 (diff)
downloadmongo-6e2d6ae03be6592d95334c43a726cde10247bd24.tar.gz
SERVER-17039 Re-add db.currentOp locking stats
This reintroduces the per-operation locking statistics. Also reverts commit 9b1392162e0f7564cfc3b1634ab78ec1a7f7c871.
Diffstat (limited to 'src/mongo/db/concurrency/lock_state.cpp')
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp37
1 files changed, 20 insertions, 17 deletions
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index eee634857f0..f1f59ca8e14 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -53,26 +53,22 @@ namespace {
PartitionedInstanceWideLockStats() { }
void recordAcquisition(LockerId id, ResourceId resId, LockMode mode) {
- LockStats& stats = _get(id);
- stats.recordAcquisition(resId, mode);
+ _get(id).recordAcquisition(resId, mode);
}
void recordWait(LockerId id, ResourceId resId, LockMode mode) {
- LockStats& stats = _get(id);
- stats.recordWait(resId, mode);
+ _get(id).recordWait(resId, mode);
}
void recordWaitTime(LockerId id, ResourceId resId, LockMode mode, uint64_t waitMicros) {
- LockStats& stats = _get(id);
- stats.recordWaitTime(resId, mode, waitMicros);
+ _get(id).recordWaitTime(resId, mode, waitMicros);
}
void recordDeadlock(ResourceId resId, LockMode mode) {
- LockStats& stats = _get(resId);
- stats.recordDeadlock(resId, mode);
+ _get(resId).recordDeadlock(resId, mode);
}
- void report(LockStats* outStats) const {
+ void report(SingleThreadedLockStats* outStats) const {
for (int i = 0; i < NumPartitions; i++) {
outStats->append(_partitions[i].stats);
}
@@ -87,16 +83,15 @@ namespace {
private:
// This alignment is a best effort approach to ensure that each partition falls on a
- // separate page/cache line in order to avoid false sharing. The 4096-byte alignment is
- // in an effort to play nicely with NUMA.
- struct MONGO_COMPILER_ALIGN_TYPE(4096) AlignedLockStats {
- LockStats stats;
+ // separate page/cache line in order to avoid false sharing.
+ struct MONGO_COMPILER_ALIGN_TYPE(128) AlignedLockStats {
+ AtomicLockStats stats;
};
enum { NumPartitions = 8 };
- LockStats& _get(LockerId id) {
+ AtomicLockStats& _get(LockerId id) {
return _partitions[id % NumPartitions].stats;
}
@@ -198,10 +193,13 @@ namespace {
}
template<bool IsForMMAPV1>
- void LockerImpl<IsForMMAPV1>::assertEmpty() const {
+ void LockerImpl<IsForMMAPV1>::assertEmptyAndReset() {
invariant(!inAWriteUnitOfWork());
invariant(_resourcesToUnlockAtEndOfUnitOfWork.empty());
invariant(_requests.empty());
+
+ // Reset the locking statistics so the object can be reused
+ _stats.reset();
}
template<bool IsForMMAPV1>
@@ -283,7 +281,7 @@ namespace {
// Cannot delete the Locker while there are still outstanding requests, because the
// LockManager may attempt to access deleted memory. Besides it is probably incorrect
// to delete with unaccounted locks anyways.
- assertEmpty();
+ assertEmptyAndReset();
}
template<bool IsForMMAPV1>
@@ -339,6 +337,11 @@ namespace {
LockRequest* globalLockRequest = _requests.find(resourceIdGlobal).objAddr();
invariant(globalLockRequest->mode == MODE_X);
invariant(globalLockRequest->recursiveCount == 1);
+
+ // Making this call here will record lock downgrades as acquisitions, which is acceptable
+ globalStats.recordAcquisition(_id, resourceIdGlobal, MODE_S);
+ _stats.recordAcquisition(resourceIdGlobal, MODE_S);
+
globalLockManager.downgrade(globalLockRequest, MODE_S);
if (IsForMMAPV1) {
@@ -863,7 +866,7 @@ namespace {
return &globalLockManager;
}
- void reportGlobalLockingStats(LockStats* outStats) {
+ void reportGlobalLockingStats(SingleThreadedLockStats* outStats) {
globalStats.report(outStats);
}