summaryrefslogtreecommitdiff
path: root/slabs.c
diff options
context:
space:
mode:
authordormando <dormando@rydia.net>2015-01-03 23:34:02 -0800
committerdormando <dormando@rydia.net>2015-01-03 23:34:02 -0800
commitfb2698975689c87db8be170630cced57798d474f (patch)
tree25ab24d70d1cfba88e30da8d4f8675ea843f15ff /slabs.c
parent9bce42f27c88a88f94c5e5345ced205a0ab36e89 (diff)
downloadmemcached-fb2698975689c87db8be170630cced57798d474f.tar.gz
first pass at LRU maintainer thread
The basics work, but tests still do not pass. A background thread wakes up once per second, or when signaled. It is signaled if a slab class gets an allocation request and has fewer than N chunks free. The background thread shuffles LRU's: HOT, WARM, COLD. HOT is where new items exist. HOT and WARM flow into COLD. Active items in COLD flow back to WARM. Evictions are pulled from COLD. item_update's no longer do anything (and need to be fixed to tick it->time). Items are reshuffled within or around LRU's as they reach the bottom. Ratios of HOT/WARM memory are hardcoded, as are the low/high watermarks. Thread is not fast enough right now, sets cannot block on it.
Diffstat (limited to 'slabs.c')
-rw-r--r--slabs.c31
1 files changed, 29 insertions, 2 deletions
diff --git a/slabs.c b/slabs.c
index 56d9c2b..d762af2 100644
--- a/slabs.c
+++ b/slabs.c
@@ -42,6 +42,9 @@ typedef struct {
static slabclass_t slabclass[MAX_NUMBER_OF_SLAB_CLASSES];
static size_t mem_limit = 0;
static size_t mem_malloced = 0;
+/* If the memory limit has been hit once. Used as a hint to decide when to
+ * early-wake the LRU maintenance thread */
+static bool mem_limit_reached = false;
static int power_largest;
static void *mem_base = NULL;
@@ -198,8 +201,13 @@ static int do_slabs_newslab(const unsigned int id) {
: p->size * p->perslab;
char *ptr;
- if ((mem_limit && mem_malloced + len > mem_limit && p->slabs > 0) ||
- (grow_slab_list(id) == 0) ||
+ if ((mem_limit && mem_malloced + len > mem_limit && p->slabs > 0)) {
+ mem_limit_reached = true;
+ MEMCACHED_SLABS_SLABCLASS_ALLOCATE_FAILED(id);
+ return 0;
+ }
+
+ if ((grow_slab_list(id) == 0) ||
((ptr = memory_allocate((size_t)len)) == 0)) {
MEMCACHED_SLABS_SLABCLASS_ALLOCATE_FAILED(id);
@@ -251,6 +259,11 @@ static void *do_slabs_alloc(const size_t size, unsigned int id) {
MEMCACHED_SLABS_ALLOCATE_FAILED(size, id);
}
+ /* FIXME: needs to be a per-slab watermark. */
+ if (mem_limit_reached && p->sl_curr < 50) {
+ lru_maintainer_wake(id);
+ }
+
return ret;
}
@@ -432,6 +445,20 @@ void slabs_adjust_mem_requested(unsigned int id, size_t old, size_t ntotal)
pthread_mutex_unlock(&slabs_lock);
}
+unsigned int slabs_available_chunks(const unsigned int id, bool *mem_flag,
+ unsigned int *total_chunks) {
+ unsigned int ret;
+ slabclass_t *p;
+
+ pthread_mutex_lock(&slabs_lock);
+ p = &slabclass[id];
+ ret = p->sl_curr;
+ *mem_flag = mem_limit_reached;
+ *total_chunks = p->slabs * p->perslab;
+ pthread_mutex_unlock(&slabs_lock);
+ return ret;
+}
+
static pthread_cond_t slab_rebalance_cond = PTHREAD_COND_INITIALIZER;
static volatile int do_run_slab_thread = 1;
static volatile int do_run_slab_rebalance_thread = 1;