summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeert Bosch <geert@mongodb.com>2016-02-10 14:08:01 -0500
committerRamon Fernandez <rfmnyc@gmail.com>2016-02-10 21:49:54 -0500
commitaaa36d2ce713e4bde75f6b9e73fe91bcf20865f8 (patch)
treebe53e224d487f2e53388f545990e748dc64d7ac8
parentef07be66578231cc7bb7d2958ccb4fd4ad4dc391 (diff)
downloadmongo-aaa36d2ce713e4bde75f6b9e73fe91bcf20865f8.tar.gz
SERVER-21859: ServerStatus should report clients queued on global/ticket lock
(cherry picked from commit f8dadcefeef7e738d7063ae2df57b9c856b402da)
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp75
-rw-r--r--src/mongo/db/concurrency/lock_state.h11
-rw-r--r--src/mongo/db/concurrency/locker.h11
-rw-r--r--src/mongo/db/concurrency/locker_noop.h4
-rw-r--r--src/mongo/db/stats/lock_server_status_section.cpp42
5 files changed, 77 insertions, 66 deletions
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index 04512bce9bd..bb95284bd39 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -247,22 +247,6 @@ void CondVarLockGrantNotification::notify(ResourceId resId, LockResult result) {
namespace {
TicketHolder* ticketHolders[LockModesCount] = {};
-
-void acquireTicket(LockMode mode) {
- auto holder = ticketHolders[mode];
- if (holder) {
- holder->waitForTicket();
- }
-}
-
-void releaseTicket(LockMode* mode) {
- invariant(*mode != MODE_NONE);
- auto holder = ticketHolders[*mode];
- *mode = MODE_NONE;
- if (holder) {
- holder->release();
- }
-}
} // namespace
@@ -290,6 +274,17 @@ LockerImpl<IsForMMAPV1>::~LockerImpl() {
}
template <bool IsForMMAPV1>
+Locker::ClientState LockerImpl<IsForMMAPV1>::getClientState() const {
+ auto state = _clientState.load();
+ if (state == kActiveReader && hasLockPending())
+ state = kQueuedReader;
+ if (state == kActiveWriter && hasLockPending())
+ state = kQueuedWriter;
+
+ return state;
+}
+
+template <bool IsForMMAPV1>
LockResult LockerImpl<IsForMMAPV1>::lockGlobal(LockMode mode, unsigned timeoutMs) {
LockResult result = lockGlobalBegin(mode);
if (result == LOCK_WAITING) {
@@ -307,7 +302,13 @@ template <bool IsForMMAPV1>
LockResult LockerImpl<IsForMMAPV1>::lockGlobalBegin(LockMode mode) {
dassert(isLocked() == (_modeForTicket != MODE_NONE));
if (_modeForTicket == MODE_NONE) {
- acquireTicket(mode);
+ const bool reader = isSharedLockMode(mode);
+ auto holder = ticketHolders[mode];
+ if (holder) {
+ _clientState.store(reader ? kQueuedReader : kQueuedWriter);
+ holder->waitForTicket();
+ }
+ _clientState.store(reader ? kActiveReader : kActiveWriter);
_modeForTicket = mode;
}
const LockResult result = lockBegin(resourceIdGlobal, mode);
@@ -371,6 +372,8 @@ bool LockerImpl<IsForMMAPV1>::unlockAll() {
return false;
}
+ invariant(!inAWriteUnitOfWork());
+
LockRequestsMap::Iterator it = _requests.begin();
while (!it.finished()) {
// If we're here we should only have one reference to any lock. It is a programming
@@ -379,7 +382,7 @@ bool LockerImpl<IsForMMAPV1>::unlockAll() {
if (it.key().getType() == RESOURCE_GLOBAL) {
it.next();
} else {
- invariant(_unlockImpl(it));
+ invariant(_unlockImpl(&it));
}
}
@@ -443,7 +446,12 @@ void LockerImpl<IsForMMAPV1>::downgrade(ResourceId resId, LockMode newMode) {
template <bool IsForMMAPV1>
bool LockerImpl<IsForMMAPV1>::unlock(ResourceId resId) {
LockRequestsMap::Iterator it = _requests.find(resId);
- return _unlockImpl(it);
+ if (inAWriteUnitOfWork() && shouldDelayUnlock(it.key(), (it->mode))) {
+ _resourcesToUnlockAtEndOfUnitOfWork.push(it.key());
+ return false;
+ }
+
+ return _unlockImpl(&it);
}
template <bool IsForMMAPV1>
@@ -762,12 +770,7 @@ LockResult LockerImpl<IsForMMAPV1>::lockComplete(ResourceId resId,
// Cleanup the state, since this is an unused lock now
if (result != LOCK_OK) {
LockRequestsMap::Iterator it = _requests.find(resId);
- if (globalLockManager.unlock(it.objAddr())) {
- if (resId == resourceIdGlobal)
- releaseTicket(&_modeForTicket);
- scoped_spinlock scopedLock(_lock);
- it.remove();
- }
+ _unlockImpl(&it);
}
if (yieldFlushLock) {
@@ -780,18 +783,20 @@ LockResult LockerImpl<IsForMMAPV1>::lockComplete(ResourceId resId,
}
template <bool IsForMMAPV1>
-bool LockerImpl<IsForMMAPV1>::_unlockImpl(LockRequestsMap::Iterator& it) {
- if (inAWriteUnitOfWork() && shouldDelayUnlock(it.key(), it->mode)) {
- _resourcesToUnlockAtEndOfUnitOfWork.push(it.key());
- return false;
- }
-
- if (globalLockManager.unlock(it.objAddr())) {
- if (it.key() == resourceIdGlobal)
- releaseTicket(&_modeForTicket);
+bool LockerImpl<IsForMMAPV1>::_unlockImpl(LockRequestsMap::Iterator* it) {
+ if (globalLockManager.unlock(it->objAddr())) {
+ if (it->key() == resourceIdGlobal) {
+ invariant(_modeForTicket != MODE_NONE);
+ auto holder = ticketHolders[_modeForTicket];
+ _modeForTicket = MODE_NONE;
+ if (holder) {
+ holder->release();
+ }
+ _clientState.store(kInactive);
+ }
scoped_spinlock scopedLock(_lock);
- it.remove();
+ it->remove();
return true;
}
diff --git a/src/mongo/db/concurrency/lock_state.h b/src/mongo/db/concurrency/lock_state.h
index de19f38ae44..45433391cea 100644
--- a/src/mongo/db/concurrency/lock_state.h
+++ b/src/mongo/db/concurrency/lock_state.h
@@ -32,6 +32,7 @@
#include "mongo/db/concurrency/fast_map_noalloc.h"
#include "mongo/db/concurrency/locker.h"
+#include "mongo/platform/atomic_word.h"
#include "mongo/util/concurrency/spin_lock.h"
namespace mongo {
@@ -92,6 +93,8 @@ public:
virtual ~LockerImpl();
+ virtual ClientState getClientState() const;
+
virtual LockerId getId() const {
return _id;
}
@@ -178,9 +181,9 @@ private:
/**
* The main functionality of the unlock method, except accepts iterator in order to avoid
- * additional lookups during unlockAll.
+ * additional lookups during unlockAll. Frees locks immediately, so must not call inside WUOW.
*/
- bool _unlockImpl(LockRequestsMap::Iterator& it);
+ bool _unlockImpl(LockRequestsMap::Iterator* it);
/**
* MMAP V1 locking code yields and re-acquires the flush lock occasionally in order to
@@ -189,7 +192,6 @@ private:
*/
LockMode _getModeForMMAPV1FlushLock() const;
-
// Used to disambiguate different lockers
const LockerId _id;
@@ -217,6 +219,9 @@ private:
// Mode for which the Locker acquired a ticket, or MODE_NONE if no ticket was acquired.
LockMode _modeForTicket = MODE_NONE;
+ // Indicates whether the client is active reader/writer or is queued.
+ AtomicWord<ClientState> _clientState{kInactive};
+
//////////////////////////////////////////////////////////////////////////////////////////
//
// Methods merged from LockState, which should eventually be removed or changed to methods
diff --git a/src/mongo/db/concurrency/locker.h b/src/mongo/db/concurrency/locker.h
index be9f3ea0730..cdc490e1379 100644
--- a/src/mongo/db/concurrency/locker.h
+++ b/src/mongo/db/concurrency/locker.h
@@ -57,6 +57,17 @@ public:
*/
static void setGlobalThrottling(class TicketHolder* reading, class TicketHolder* writing);
+ /**
+ * State for reporting the number of active and queued reader and writer clients.
+ */
+ enum ClientState { kInactive, kActiveReader, kActiveWriter, kQueuedReader, kQueuedWriter };
+
+ /**
+ * Return whether client is holding any locks (active), or is queued on any locks or waiting
+ * for a ticket (throttled).
+ */
+ virtual ClientState getClientState() const = 0;
+
virtual LockerId getId() const = 0;
/**
diff --git a/src/mongo/db/concurrency/locker_noop.h b/src/mongo/db/concurrency/locker_noop.h
index ad80d2f13ca..cfbd3234c2c 100644
--- a/src/mongo/db/concurrency/locker_noop.h
+++ b/src/mongo/db/concurrency/locker_noop.h
@@ -41,6 +41,10 @@ class LockerNoop : public Locker {
public:
LockerNoop() {}
+ virtual ClientState getClientState() const {
+ invariant(false);
+ }
+
virtual LockerId getId() const {
invariant(false);
}
diff --git a/src/mongo/db/stats/lock_server_status_section.cpp b/src/mongo/db/stats/lock_server_status_section.cpp
index e320fe04930..c6cde40d0ad 100644
--- a/src/mongo/db/stats/lock_server_status_section.cpp
+++ b/src/mongo/db/stats/lock_server_status_section.cpp
@@ -28,6 +28,8 @@
#include "mongo/platform/basic.h"
+#include <valarray>
+
#include "mongo/db/client.h"
#include "mongo/db/commands/server_status.h"
#include "mongo/db/concurrency/lock_stats.h"
@@ -48,36 +50,18 @@ public:
}
virtual BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
- int numTotal = 0;
- int numWriteLocked = 0;
- int numReadLocked = 0;
- int numWaitingRead = 0;
- int numWaitingWrite = 0;
+ std::valarray<int> clientStatusCounts(5);
// This returns the blocked lock states
for (ServiceContext::LockedClientsCursor cursor(txn->getClient()->getServiceContext());
Client* client = cursor.next();) {
invariant(client);
- ++numTotal;
stdx::unique_lock<Client> uniqueLock(*client);
const OperationContext* opCtx = client->getOperationContext();
- if (opCtx == NULL)
- continue;
-
- if (opCtx->lockState()->isWriteLocked()) {
- numWriteLocked++;
-
- if (opCtx->lockState()->getWaitingResource().isValid()) {
- numWaitingWrite++;
- }
- } else if (opCtx->lockState()->isReadLocked()) {
- numReadLocked++;
-
- if (opCtx->lockState()->getWaitingResource().isValid()) {
- numWaitingRead++;
- }
- }
+ auto state = opCtx ? opCtx->lockState()->getClientState() : Locker::kInactive;
+ invariant(state < sizeof(clientStatusCounts));
+ clientStatusCounts[state]++;
}
// Construct the actual return value out of the mutex
@@ -88,18 +72,20 @@ public:
{
BSONObjBuilder currentQueueBuilder(ret.subobjStart("currentQueue"));
- currentQueueBuilder.append("total", numWaitingRead + numWaitingWrite);
- currentQueueBuilder.append("readers", numWaitingRead);
- currentQueueBuilder.append("writers", numWaitingWrite);
+ currentQueueBuilder.append("total",
+ clientStatusCounts[Locker::kQueuedReader] +
+ clientStatusCounts[Locker::kQueuedWriter]);
+ currentQueueBuilder.append("readers", clientStatusCounts[Locker::kQueuedReader]);
+ currentQueueBuilder.append("writers", clientStatusCounts[Locker::kQueuedWriter]);
currentQueueBuilder.done();
}
{
BSONObjBuilder activeClientsBuilder(ret.subobjStart("activeClients"));
- activeClientsBuilder.append("total", numTotal);
- activeClientsBuilder.append("readers", numReadLocked);
- activeClientsBuilder.append("writers", numWriteLocked);
+ activeClientsBuilder.append("total", clientStatusCounts.sum());
+ activeClientsBuilder.append("readers", clientStatusCounts[Locker::kActiveReader]);
+ activeClientsBuilder.append("writers", clientStatusCounts[Locker::kActiveWriter]);
activeClientsBuilder.done();
}