summaryrefslogtreecommitdiff
path: root/src/mongo/db/concurrency
diff options
context:
space:
mode:
authorDianna Hohensee <dianna.hohensee@10gen.com>2018-03-21 10:12:34 -0400
committerDianna Hohensee <dianna.hohensee@10gen.com>2018-03-22 15:48:54 -0400
commit379044733ab55adf136e7cbbff40a4b6b3b80931 (patch)
treec5b1ddb46194fe8f2ec0d9ba4069467eef9b4dc8 /src/mongo/db/concurrency
parentb90e530039593357d49f8f5bc7e431a667804000 (diff)
downloadmongo-379044733ab55adf136e7cbbff40a4b6b3b80931.tar.gz
SERVER-33316 Remove limit on number of locks a single operation can take
(Replaces the pre-allocated array in FastMapNoAlloc with a dynamic deque)
Diffstat (limited to 'src/mongo/db/concurrency')
-rw-r--r--src/mongo/db/concurrency/fast_map_noalloc.h157
-rw-r--r--src/mongo/db/concurrency/fast_map_noalloc_test.cpp23
-rw-r--r--src/mongo/db/concurrency/lock_manager.cpp10
-rw-r--r--src/mongo/db/concurrency/lock_manager.h2
-rw-r--r--src/mongo/db/concurrency/lock_manager_defs.h2
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp2
-rw-r--r--src/mongo/db/concurrency/lock_state.h5
7 files changed, 100 insertions, 101 deletions
diff --git a/src/mongo/db/concurrency/fast_map_noalloc.h b/src/mongo/db/concurrency/fast_map_noalloc.h
index b6fdcf0a7a3..5ac690127a2 100644
--- a/src/mongo/db/concurrency/fast_map_noalloc.h
+++ b/src/mongo/db/concurrency/fast_map_noalloc.h
@@ -28,6 +28,8 @@
#pragma once
+#include <deque>
+
#include "mongo/base/static_assert.h"
#include "mongo/util/assert_util.h"
@@ -36,32 +38,52 @@ namespace mongo {
/**
* NOTE: This structure should not be used for anything other than the Lock Manager.
*
- * This is a simple implementation of an unordered associative array with minimal
- * functionality, used by the lock manager. It keeps a small number of memory entries to store
- * values, in order to avoid memory allocations, which dominate the cost of the lock manager
- * calls by a wide margin.
+ * This is a simple implementation of an unordered associative array with minimal functionality,
+ * used by the lock manager. It keeps a small number of memory entries to store values, in order to
+ * avoid memory allocations, which dominate the cost of the lock manager calls by a wide margin.
*
* This class is not thread-safe.
+ *
+ * Note: this custom data structure is necessary because we need: fast memory access; to maintain
+ * all data pointer/reference validity when entries are added/removed; and to avoid costly and
+ * repetitive entry mallocs and frees.
*/
-template <class KeyType, class ValueType, int PreallocCount>
+template <class KeyType, class ValueType>
class FastMapNoAlloc {
-public:
+private:
+ /**
+ * Map entry through which we avoid releasing memory: we mark it as inUse or not.
+ * Maps keys to values.
+ */
+ struct PreallocEntry {
+ bool inUse = false;
+
+ KeyType key;
+ ValueType value;
+ };
+
+ typedef typename std::deque<PreallocEntry> Container;
+
+ typedef typename Container::size_type size_type;
+
+ typedef typename Container::iterator map_iterator;
+
+ typedef typename Container::const_iterator const_map_iterator;
+
+
/**
* Forward-only iterator. Does not synchronize with the underlying collection in any way.
* In other words, do not modify the collection while there is an open iterator on it.
*/
- template <class MapType, class IteratorValueType>
+ template <class MapType, class IteratorValueType, class IteratorType>
class IteratorImpl {
public:
- IteratorImpl(const IteratorImpl& other) : _map(other._map), _idx(other._idx) {}
-
-
//
// Operators
//
- bool operator!() const {
- return finished();
+ operator bool() const {
+ return !finished();
}
IteratorValueType& operator*() const {
@@ -82,7 +104,7 @@ public:
* can be used to determine whether a previous call to find has found something.
*/
bool finished() const {
- return (MONGO_unlikely(_idx == PreallocCount));
+ return (_it == _map._fastAccess.end());
}
/**
@@ -92,7 +114,7 @@ public:
IteratorValueType* objAddr() const {
invariant(!finished());
- return &_map._fastAccess[_idx].value;
+ return &(_it->value);
}
/**
@@ -102,7 +124,7 @@ public:
const KeyType& key() const {
invariant(!finished());
- return _map._fastAccess[_idx].key;
+ return _it->key;
}
/**
@@ -111,9 +133,8 @@ public:
*/
void next() {
invariant(!finished());
-
- while (++_idx < PreallocCount) {
- if (_map._fastAccess[_idx].inUse) {
+ while (++_it != _map._fastAccess.end()) {
+ if (_it->inUse) {
return;
}
}
@@ -125,9 +146,9 @@ public:
*/
void remove() {
invariant(!finished());
- invariant(_map._fastAccess[_idx].inUse);
+ invariant(_it->inUse);
- _map._fastAccess[_idx].inUse = false;
+ _it->inUse = false;
_map._fastAccessUsedSize--;
next();
@@ -135,26 +156,31 @@ public:
private:
- friend class FastMapNoAlloc<KeyType, ValueType, PreallocCount>;
+ friend class FastMapNoAlloc<KeyType, ValueType>;
// Used for iteration of the complete map
- IteratorImpl(MapType& map) : _map(map), _idx(-1) {
- next();
+ IteratorImpl(MapType& map) : _map(map), _it(map._fastAccess.begin()) {
+ while (_it != _map._fastAccess.end()) {
+ if (_it->inUse) {
+ return;
+ }
+ ++_it;
+ }
}
// Used for iterator starting at a position
- IteratorImpl(MapType& map, int idx) : _map(map), _idx(idx) {
- invariant(_idx >= 0);
+ IteratorImpl(MapType& map, IteratorType it) : _map(map), _it(std::move(it)) {
+ invariant(_it != _map._fastAccess.end());
}
// Used for iteration starting at a particular key
- IteratorImpl(MapType& map, const KeyType& key) : _map(map), _idx(0) {
- while (_idx < PreallocCount) {
- if (_map._fastAccess[_idx].inUse && (_map._fastAccess[_idx].key == key)) {
+ IteratorImpl(MapType& map, const KeyType& key) : _map(map), _it(_map._fastAccess.begin()) {
+ while (_it != _map._fastAccess.end()) {
+ if (_it->inUse && _it->key == key) {
return;
}
- ++_idx;
+ ++_it;
}
}
@@ -162,41 +188,42 @@ public:
// The map being iterated on
MapType& _map;
- // Index to the current entry being iterated
- int _idx;
+ // Iterator on the map
+ IteratorType _it;
};
+public:
+ typedef IteratorImpl<FastMapNoAlloc<KeyType, ValueType>, ValueType, map_iterator> Iterator;
- typedef IteratorImpl<FastMapNoAlloc<KeyType, ValueType, PreallocCount>, ValueType> Iterator;
-
- typedef IteratorImpl<const FastMapNoAlloc<KeyType, ValueType, PreallocCount>, const ValueType>
+ typedef IteratorImpl<const FastMapNoAlloc<KeyType, ValueType>,
+ const ValueType,
+ const_map_iterator>
ConstIterator;
-
- FastMapNoAlloc() : _fastAccess(), _fastAccessUsedSize(0) {}
+ FastMapNoAlloc() : _fastAccessUsedSize(0) {}
/**
* Inserts the specified entry in the map and returns a reference to the memory for the
* entry just inserted.
*/
Iterator insert(const KeyType& key) {
- uassert(ErrorCodes::TooManyLocks,
- "Operation requires too many locks",
- _fastAccessUsedSize < PreallocCount);
+ if (_fastAccessUsedSize == _fastAccess.size()) {
+ // Place the new entry in the front so the below map iteration is faster.
+ _fastAccess.emplace_front();
+ }
- // Find the first unused slot. This could probably be even further optimized by adding
- // a field pointing to the first unused location.
- int idx = 0;
- for (; _fastAccess[idx].inUse; idx++)
- ;
+ map_iterator it = _fastAccess.begin();
+ while (it != _fastAccess.end() && it->inUse) {
+ ++it;
+ }
- invariant(idx < PreallocCount);
+ invariant(it != _fastAccess.end() && !(it->inUse));
- _fastAccess[idx].inUse = true;
- _fastAccess[idx].key = key;
- _fastAccessUsedSize++;
+ it->inUse = true;
+ it->key = key;
+ ++_fastAccessUsedSize;
- return Iterator(*this, idx);
+ return Iterator(*this, it);
}
/**
@@ -214,7 +241,7 @@ public:
* Returns an iterator pointing to the first position, which has entry with the specified
* key. Before dereferencing the returned iterator, it should be checked for validity using
* the finished() method or the ! operator. If no element was found, finished() will return
- * false.
+ * true.
*
* While it is allowed to call next() on the returned iterator, this is not very useful,
* because the container is not ordered.
@@ -227,7 +254,7 @@ public:
return ConstIterator(*this, key);
}
- int size() const {
+ size_t size() const {
return _fastAccessUsedSize;
}
bool empty() const {
@@ -235,30 +262,12 @@ public:
}
private:
- // Empty and very large maps do not make sense since there will be no performance gain, so
- // disallow them.
- MONGO_STATIC_ASSERT(PreallocCount > 0);
- MONGO_STATIC_ASSERT(PreallocCount < 32);
-
- // Iterator accesses the map directly
- friend class IteratorImpl<FastMapNoAlloc<KeyType, ValueType, PreallocCount>, ValueType>;
-
- friend class IteratorImpl<const FastMapNoAlloc<KeyType, ValueType, PreallocCount>,
- const ValueType>;
-
-
- struct PreallocEntry {
- PreallocEntry() : inUse(false) {}
-
- bool inUse;
-
- KeyType key;
- ValueType value;
- };
+ // We chose a deque data structure because it maintains the validity of existing
+ // pointers/references to its contents when it allocates more memory. Deque also gives us O(1)
+ // emplace_front() in insert().
+ std::deque<PreallocEntry> _fastAccess;
- // Pre-allocated memory for entries
- PreallocEntry _fastAccess[PreallocCount];
- int _fastAccessUsedSize;
+ size_type _fastAccessUsedSize;
};
} // namespace mongo
diff --git a/src/mongo/db/concurrency/fast_map_noalloc_test.cpp b/src/mongo/db/concurrency/fast_map_noalloc_test.cpp
index a75d62f521e..5876869324f 100644
--- a/src/mongo/db/concurrency/fast_map_noalloc_test.cpp
+++ b/src/mongo/db/concurrency/fast_map_noalloc_test.cpp
@@ -46,7 +46,7 @@ struct TestStruct {
std::string value;
};
-typedef class FastMapNoAlloc<ResourceId, TestStruct, 6> TestFastMapNoAlloc;
+typedef class FastMapNoAlloc<ResourceId, TestStruct> TestFastMapNoAlloc;
TEST(FastMapNoAlloc, Empty) {
@@ -68,15 +68,15 @@ TEST(FastMapNoAlloc, NotEmpty) {
ASSERT(!it.finished());
ASSERT(!!it);
- ASSERT(it->id == 101);
- ASSERT(it->value == "Item101");
+ ASSERT(it->id == 102);
+ ASSERT(it->value == "Item102");
it.next();
ASSERT(!it.finished());
ASSERT(!!it);
- ASSERT(it->id == 102);
- ASSERT(it->value == "Item102");
+ ASSERT(it->id == 101);
+ ASSERT(it->value == "Item101");
// We are at the last element
it.next();
@@ -84,19 +84,6 @@ TEST(FastMapNoAlloc, NotEmpty) {
ASSERT(!it);
}
-TEST(FastMapNoAlloc, ExceedCapacity) {
- TestFastMapNoAlloc map;
-
- for (int i = 0; i < 6; i++) {
- map.insert(ResourceId(RESOURCE_COLLECTION, i))
- ->initNew(i, "Item" + boost::lexical_cast<std::string>(i));
- }
-
- ASSERT_THROWS_CODE(map.insert(ResourceId(RESOURCE_COLLECTION, 6)),
- AssertionException,
- ErrorCodes::TooManyLocks);
-}
-
TEST(FastMapNoAlloc, FindNonExisting) {
TestFastMapNoAlloc map;
diff --git a/src/mongo/db/concurrency/lock_manager.cpp b/src/mongo/db/concurrency/lock_manager.cpp
index cb8765a8ef7..aaffd7129df 100644
--- a/src/mongo/db/concurrency/lock_manager.cpp
+++ b/src/mongo/db/concurrency/lock_manager.cpp
@@ -188,7 +188,7 @@ struct LockHead {
}
/**
- * Finish creation of request and put it on the lockhead's conflict or granted queues. Returns
+ * Finish creation of request and put it on the LockHead's conflict or granted queues. Returns
* LOCK_WAITING for conflict case and LOCK_OK otherwise.
*/
LockResult newRequest(LockRequest* request) {
@@ -282,7 +282,7 @@ struct LockHead {
// the end of the queue. Conversion requests are granted from the beginning forward.
LockRequestList grantedList;
- // Counts the grants and coversion counts for each of the supported lock modes. These
+ // Counts the grants and conversion counts for each of the supported lock modes. These
// counts should exactly match the aggregated modes on the granted list.
uint32_t grantedCounts[LockModesCount];
@@ -297,7 +297,7 @@ struct LockHead {
// Doubly-linked list of requests, which have not been granted yet because they conflict
// with the set of granted modes. Requests are queued at the end of the queue and are
// granted from the beginning forward, which gives these locks FIFO ordering. Exceptions
- // are high-priorty locks, such as the MMAP V1 flush lock.
+ // are high-priority locks, such as the MMAP V1 flush lock.
LockRequestList conflictList;
// Counts the conflicting requests for each of the lock modes. These counts should exactly
@@ -338,7 +338,7 @@ struct LockHead {
* of resourceId to PartitionedLockHead.
*
* As long as all lock requests for a resource have an intent mode, as opposed to a conflicting
- * mode, its LockHead may reference ParitionedLockHeads. A partitioned LockHead will not have
+ * mode, its LockHead may reference PartitionedLockHeads. A partitioned LockHead will not have
* any conflicts. The total set of granted requests (with intent mode) is the union of
* its grantedList and all grantedLists in PartitionedLockHeads.
*
@@ -397,7 +397,7 @@ void LockHead::migratePartitionedLockHeads() {
partition->data.erase(it);
delete partitionedLock;
}
- // Don't pop-back to early as otherwise the lock will be considered not partioned in
+ // Don't pop-back to early as otherwise the lock will be considered not partitioned in
// newRequest().
partitions.pop_back();
}
diff --git a/src/mongo/db/concurrency/lock_manager.h b/src/mongo/db/concurrency/lock_manager.h
index ae64a9c3398..2956232f996 100644
--- a/src/mongo/db/concurrency/lock_manager.h
+++ b/src/mongo/db/concurrency/lock_manager.h
@@ -77,7 +77,7 @@ public:
* lock becomes granted, the notification will not be invoked.
*
* If the return value is LOCK_WAITING, the notification object *must*
- * live at least until the notfy method has been invoked or unlock has
+ * live at least until the notify method has been invoked or unlock has
* been called for the resource it was assigned to. Failure to do so will
* cause the lock manager to call into an invalid memory location.
* @param mode Mode in which the resource should be locked. Lock upgrades are allowed.
diff --git a/src/mongo/db/concurrency/lock_manager_defs.h b/src/mongo/db/concurrency/lock_manager_defs.h
index 6edb5832fbd..ae8ed410c94 100644
--- a/src/mongo/db/concurrency/lock_manager_defs.h
+++ b/src/mongo/db/concurrency/lock_manager_defs.h
@@ -128,7 +128,7 @@ enum LockResult {
LOCK_DEADLOCK,
/**
- * This is used as an initialiser value. Should never be returned.
+ * This is used as an initializer value. Should never be returned.
*/
LOCK_INVALID
};
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index eaedc2f3f3f..79c40f1e0e5 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -998,7 +998,7 @@ namespace {
/**
* Periodically purges unused lock buckets. The first time the lock is used again after
* cleanup it needs to be allocated, and similarly, every first use by a client for an intent
- * mode may need to create a partitioned lock head. Cleanup is done roughtly once a minute.
+ * mode may need to create a partitioned lock head. Cleanup is done roughly once a minute.
*/
class UnusedLockCleaner : PeriodicTask {
public:
diff --git a/src/mongo/db/concurrency/lock_state.h b/src/mongo/db/concurrency/lock_state.h
index 1083d4c25ca..02d5d58e3f9 100644
--- a/src/mongo/db/concurrency/lock_state.h
+++ b/src/mongo/db/concurrency/lock_state.h
@@ -232,7 +232,7 @@ public:
private:
friend class AutoYieldFlushLockForMMAPV1Commit;
- typedef FastMapNoAlloc<ResourceId, LockRequest, 16> LockRequestsMap;
+ typedef FastMapNoAlloc<ResourceId, LockRequest> LockRequestsMap;
/**
* Like lockGlobalBegin, but accepts a deadline for acquiring a ticket.
@@ -281,6 +281,9 @@ private:
//
// This has to be locked inside const methods, hence the mutable.
mutable SpinLock _lock;
+ // Note: this data structure must always guarantee the continued validity of pointers/references
+ // to its contents (LockRequests). The LockManager maintains a LockRequestList of pointers to
+ // the LockRequests managed by this data structure.
LockRequestsMap _requests;
// Reuse the notification object across requests so we don't have to create a new mutex