summaryrefslogtreecommitdiff
path: root/src/mongo/db/concurrency
diff options
context:
space:
mode:
authorGeert Bosch <geert@mongodb.com>2016-02-01 10:01:27 -0500
committerGeert Bosch <geert@mongodb.com>2016-02-01 14:47:46 -0500
commit2969c83e3dcd4ca26e81b27454938e1c2aa7fe53 (patch)
tree8501a580f904e5083f7fd774198106117cac1189 /src/mongo/db/concurrency
parent6df889d5b5e432b1829947f99e044aa52ba93f91 (diff)
downloadmongo-2969c83e3dcd4ca26e81b27454938e1c2aa7fe53.tar.gz
SERVER-22011: Obtain tickets at outermost global lock in WT
Diffstat (limited to 'src/mongo/db/concurrency')
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp46
-rw-r--r--src/mongo/db/concurrency/lock_state.h2
-rw-r--r--src/mongo/db/concurrency/locker.h9
3 files changed, 57 insertions, 0 deletions
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index ea2b9b9c63a..04512bce9bd 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -39,6 +39,7 @@
#include "mongo/platform/compiler.h"
#include "mongo/util/background.h"
#include "mongo/util/concurrency/synchronization.h"
+#include "mongo/util/concurrency/ticketholder.h"
#include "mongo/util/debug_util.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
@@ -188,6 +189,7 @@ void LockerImpl<IsForMMAPV1>::assertEmptyAndReset() {
invariant(!inAWriteUnitOfWork());
invariant(_resourcesToUnlockAtEndOfUnitOfWork.empty());
invariant(_requests.empty());
+ invariant(_modeForTicket == MODE_NONE);
// Reset the locking statistics so the object can be reused
_stats.reset();
@@ -243,11 +245,38 @@ void CondVarLockGrantNotification::notify(ResourceId resId, LockResult result) {
_cond.notify_all();
}
+namespace {
+TicketHolder* ticketHolders[LockModesCount] = {};
+
+void acquireTicket(LockMode mode) {
+ auto holder = ticketHolders[mode];
+ if (holder) {
+ holder->waitForTicket();
+ }
+}
+
+void releaseTicket(LockMode* mode) {
+ invariant(*mode != MODE_NONE);
+ auto holder = ticketHolders[*mode];
+ *mode = MODE_NONE;
+ if (holder) {
+ holder->release();
+ }
+}
+} // namespace
+
//
// Locker
//
+/* static */
+void Locker::setGlobalThrottling(class TicketHolder* reading, class TicketHolder* writing) {
+ ticketHolders[MODE_S] = reading;
+ ticketHolders[MODE_IS] = reading;
+ ticketHolders[MODE_IX] = writing;
+}
+
template <bool IsForMMAPV1>
LockerImpl<IsForMMAPV1>::LockerImpl()
: _id(idCounter.addAndFetch(1)), _wuowNestingLevel(0), _batchWriter(false) {}
@@ -276,6 +305,11 @@ LockResult LockerImpl<IsForMMAPV1>::lockGlobal(LockMode mode, unsigned timeoutMs
template <bool IsForMMAPV1>
LockResult LockerImpl<IsForMMAPV1>::lockGlobalBegin(LockMode mode) {
+ dassert(isLocked() == (_modeForTicket != MODE_NONE));
+ if (_modeForTicket == MODE_NONE) {
+ acquireTicket(mode);
+ _modeForTicket = mode;
+ }
const LockResult result = lockBegin(resourceIdGlobal, mode);
if (result == LOCK_OK)
return LOCK_OK;
@@ -315,6 +349,10 @@ void LockerImpl<IsForMMAPV1>::downgradeGlobalXtoSForMMAPV1() {
LockRequest* globalLockRequest = _requests.find(resourceIdGlobal).objAddr();
invariant(globalLockRequest->mode == MODE_X);
invariant(globalLockRequest->recursiveCount == 1);
+ invariant(_modeForTicket == MODE_X);
+ // Note that this locker will not actually have a ticket (as MODE_X has no TicketHolder) or
+ // acquire one now, but at most a single thread can be in this downgraded MODE_S situation,
+ // so it's OK.
// Making this call here will record lock downgrades as acquisitions, which is acceptable
globalStats.recordAcquisition(_id, resourceIdGlobal, MODE_S);
@@ -561,6 +599,7 @@ bool LockerImpl<IsForMMAPV1>::saveLockStateAndUnlock(Locker::LockSnapshot* state
invariant(unlock(resId));
}
+ invariant(!isLocked());
// Sort locks by ResourceId. They'll later be acquired in this canonical locking order.
std::sort(stateOut->locks.begin(), stateOut->locks.end());
@@ -572,6 +611,7 @@ template <bool IsForMMAPV1>
void LockerImpl<IsForMMAPV1>::restoreLockState(const Locker::LockSnapshot& state) {
// We shouldn't be saving and restoring lock state from inside a WriteUnitOfWork.
invariant(!inAWriteUnitOfWork());
+ invariant(_modeForTicket == MODE_NONE);
std::vector<OneLock>::const_iterator it = state.locks.begin();
// If we locked the PBWM, it must be locked before the resourceIdGlobal resource.
@@ -590,6 +630,7 @@ void LockerImpl<IsForMMAPV1>::restoreLockState(const Locker::LockSnapshot& state
invariant(LOCK_OK == lock(it->resourceId, it->mode));
}
}
+ invariant(_modeForTicket != MODE_NONE);
}
template <bool IsForMMAPV1>
@@ -722,6 +763,8 @@ LockResult LockerImpl<IsForMMAPV1>::lockComplete(ResourceId resId,
if (result != LOCK_OK) {
LockRequestsMap::Iterator it = _requests.find(resId);
if (globalLockManager.unlock(it.objAddr())) {
+ if (resId == resourceIdGlobal)
+ releaseTicket(&_modeForTicket);
scoped_spinlock scopedLock(_lock);
it.remove();
}
@@ -744,6 +787,9 @@ bool LockerImpl<IsForMMAPV1>::_unlockImpl(LockRequestsMap::Iterator& it) {
}
if (globalLockManager.unlock(it.objAddr())) {
+ if (it.key() == resourceIdGlobal)
+ releaseTicket(&_modeForTicket);
+
scoped_spinlock scopedLock(_lock);
it.remove();
diff --git a/src/mongo/db/concurrency/lock_state.h b/src/mongo/db/concurrency/lock_state.h
index cf634fd3168..de19f38ae44 100644
--- a/src/mongo/db/concurrency/lock_state.h
+++ b/src/mongo/db/concurrency/lock_state.h
@@ -214,6 +214,8 @@ private:
int _wuowNestingLevel;
std::queue<ResourceId> _resourcesToUnlockAtEndOfUnitOfWork;
+ // Mode for which the Locker acquired a ticket, or MODE_NONE if no ticket was acquired.
+ LockMode _modeForTicket = MODE_NONE;
//////////////////////////////////////////////////////////////////////////////////////////
//
diff --git a/src/mongo/db/concurrency/locker.h b/src/mongo/db/concurrency/locker.h
index 7c19f421e5c..be9f3ea0730 100644
--- a/src/mongo/db/concurrency/locker.h
+++ b/src/mongo/db/concurrency/locker.h
@@ -48,6 +48,15 @@ class Locker {
public:
virtual ~Locker() {}
+ /**
+ * Require global lock attempts with obtain tickets from 'reading' (for MODE_S and MODE_IS),
+ * and from 'writing' (for MODE_IX), which must have static lifetimes. There is no throttling
+ * for MODE_X, as there can only ever be a single locker using this mode. The throttling is
+ * intended to defend against arge drops in throughput under high load due to too much
+ * concurrency.
+ */
+ static void setGlobalThrottling(class TicketHolder* reading, class TicketHolder* writing);
+
virtual LockerId getId() const = 0;
/**