summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authordormando <dormando@rydia.net>2015-10-09 20:59:04 -0700
committerdormando <dormando@rydia.net>2015-11-18 23:14:36 -0800
commitb1debc4c96134e9014a929c32d6990cb5d66a22c (patch)
tree47ef4449d33be9603f3dc9c1c041b7688b784b92
parent8fa54f7e43e0ff3ea0f555cb52fd2dda01db3efa (diff)
downloadmemcached-b1debc4c96134e9014a929c32d6990cb5d66a22c.tar.gz
try harder to save items
previously the slab mover would evict items if the new chunk was within the slab page being moved. now it will do an inline reclaim of the chunk and try until it runs out of memory.
-rw-r--r--doc/protocol.txt8
-rw-r--r--items.c2
-rw-r--r--memcached.c2
-rw-r--r--memcached.h4
-rw-r--r--slabs.c74
-rw-r--r--slabs.h3
6 files changed, 57 insertions, 36 deletions
diff --git a/doc/protocol.txt b/doc/protocol.txt
index 7ebfec6..9d086c4 100644
--- a/doc/protocol.txt
+++ b/doc/protocol.txt
@@ -607,10 +607,10 @@ integers separated by a colon (treat this as a floating point number).
| slab_reassign_evictions_nomem |
| | 64u | Valid items evicted during a page move |
| | | (due to no free memory in slab) |
-| slab_reassign_evictions_samepage |
-| | 64u | Valid items evicted during a page move |
-| | | (due to free memory being in the same |
-| | | page as the source item) |
+| slab_reassign_inline_reclaim |
+| | 64u | Internal stat counter for when the page |
+| | | mover clears memory from the chunk |
+| | | freelist when it wasn't expecting to. |
| slab_reassign_busy_items |
| | 64u | Items busy during page move, requiring a |
| | | retry before page can be moved. |
diff --git a/items.c b/items.c
index 685b06b..cfecf8e 100644
--- a/items.c
+++ b/items.c
@@ -179,7 +179,7 @@ item *do_item_alloc(char *key, const size_t nkey, const int flags,
if (!settings.lru_maintainer_thread) {
lru_pull_tail(id, COLD_LRU, 0, false, cur_hv);
}
- it = slabs_alloc(ntotal, id, &total_chunks);
+ it = slabs_alloc(ntotal, id, &total_chunks, 0);
if (settings.expirezero_does_not_evict)
total_chunks -= noexp_lru_size(id);
if (it == NULL) {
diff --git a/memcached.c b/memcached.c
index 2f61acd..ff1af50 100644
--- a/memcached.c
+++ b/memcached.c
@@ -2633,7 +2633,7 @@ static void server_stats(ADD_STAT add_stats, conn *c) {
if (settings.slab_reassign) {
APPEND_STAT("slab_reassign_rescues", "%llu", stats.slab_reassign_rescues);
APPEND_STAT("slab_reassign_evictions_nomem", "%llu", stats.slab_reassign_evictions_nomem);
- APPEND_STAT("slab_reassign_evictions_samepage", "%llu", stats.slab_reassign_evictions_samepage);
+ APPEND_STAT("slab_reassign_inline_reclaim", "%llu", stats.slab_reassign_inline_reclaim);
APPEND_STAT("slab_reassign_busy_items", "%llu", stats.slab_reassign_busy_items);
APPEND_STAT("slab_reassign_running", "%u", stats.slab_reassign_running);
APPEND_STAT("slabs_moved", "%llu", stats.slabs_moved);
diff --git a/memcached.h b/memcached.h
index 489b59e..df972f5 100644
--- a/memcached.h
+++ b/memcached.h
@@ -288,7 +288,7 @@ struct stats {
uint64_t slabs_moved; /* times slabs were moved around */
uint64_t slab_reassign_rescues; /* items rescued during slab move */
uint64_t slab_reassign_evictions_nomem; /* valid items lost during slab move */
- uint64_t slab_reassign_evictions_samepage; /* valid items lost during slab move */
+ uint64_t slab_reassign_inline_reclaim; /* valid items lost during slab move */
uint64_t slab_reassign_busy_items; /* valid temporarily unmovable */
uint64_t lru_crawler_starts; /* Number of item crawlers kicked off */
bool lru_crawler_running; /* crawl in progress */
@@ -533,7 +533,7 @@ struct slab_rebalance {
uint32_t busy_items;
uint32_t rescues;
uint32_t evictions_nomem;
- uint32_t evictions_samepage;
+ uint32_t inline_reclaim;
uint8_t done;
};
diff --git a/slabs.c b/slabs.c
index 8a4f353..770480e 100644
--- a/slabs.c
+++ b/slabs.c
@@ -239,7 +239,8 @@ static int do_slabs_newslab(const unsigned int id) {
}
/*@null@*/
-static void *do_slabs_alloc(const size_t size, unsigned int id, unsigned int *total_chunks) {
+static void *do_slabs_alloc(const size_t size, unsigned int id, unsigned int *total_chunks,
+ unsigned int flags) {
slabclass_t *p;
void *ret = NULL;
item *it = NULL;
@@ -256,10 +257,11 @@ static void *do_slabs_alloc(const size_t size, unsigned int id, unsigned int *to
}
/* fail unless we have space at the end of a recently allocated page,
we have something on our freelist, or we could allocate a new page */
- if (! (p->sl_curr != 0 || do_slabs_newslab(id) != 0)) {
- /* We don't have more memory available */
- ret = NULL;
- } else if (p->sl_curr != 0) {
+ if (p->sl_curr == 0 && flags != SLABS_ALLOC_NO_NEWPAGE) {
+ do_slabs_newslab(id);
+ }
+
+ if (p->sl_curr != 0) {
/* return off our freelist */
it = (item *)p->slots;
p->slots = it->next;
@@ -270,6 +272,8 @@ static void *do_slabs_alloc(const size_t size, unsigned int id, unsigned int *to
it->refcount = 1;
p->sl_curr--;
ret = (void *)it;
+ } else {
+ ret = NULL;
}
if (ret) {
@@ -430,11 +434,12 @@ static void *memory_allocate(size_t size) {
return ret;
}
-void *slabs_alloc(size_t size, unsigned int id, unsigned int *total_chunks) {
+void *slabs_alloc(size_t size, unsigned int id, unsigned int *total_chunks,
+ unsigned int flags) {
void *ret;
pthread_mutex_lock(&slabs_lock);
- ret = do_slabs_alloc(size, id, total_chunks);
+ ret = do_slabs_alloc(size, id, total_chunks, flags);
pthread_mutex_unlock(&slabs_lock);
return ret;
}
@@ -542,6 +547,36 @@ static int slab_rebalance_start(void) {
return 0;
}
+/* CALLED WITH slabs_lock HELD */
+static void *slab_rebalance_alloc(const size_t size, unsigned int id) {
+ slabclass_t *s_cls;
+ s_cls = &slabclass[slab_rebal.s_clsid];
+ int x;
+ item *new_it = NULL;
+
+ for (x = 0; x < s_cls->perslab; x++) {
+ new_it = do_slabs_alloc(size, id, NULL, SLABS_ALLOC_NO_NEWPAGE);
+ /* check that memory isn't within the range to clear */
+ if (new_it == NULL) {
+ break;
+ }
+ if ((void *)new_it >= slab_rebal.slab_start
+ && (void *)new_it < slab_rebal.slab_end) {
+ /* Pulled something we intend to free. Mark it as freed since
+ * we've already done the work of unlinking it from the freelist.
+ */
+ s_cls->requested -= size;
+ new_it->refcount = 0;
+ new_it->it_flags = ITEM_SLABBED|ITEM_FETCHED;
+ new_it = NULL;
+ slab_rebal.inline_reclaim++;
+ } else {
+ break;
+ }
+ }
+ return new_it;
+}
+
enum move_status {
MOVE_PASS=0, MOVE_FROM_SLAB, MOVE_FROM_LRU, MOVE_BUSY, MOVE_LOCKED
};
@@ -657,26 +692,11 @@ static int slab_rebalance_move(void) {
* WARM LRU?
*/
save_item = 0;
- } else if (s_cls->sl_curr < 1) {
+ } else if ((new_it = slab_rebalance_alloc(ntotal, slab_rebal.s_clsid)) == NULL) {
save_item = 0;
slab_rebal.evictions_nomem++;
} else {
save_item = 1;
- /* BIT OF A HACK: if sl_curr is > 0 alloc won't try to
- * pull from global pool to satisfy the request.
- * FIXME: pile on more flags?
- */
- new_it = do_slabs_alloc(ntotal, slab_rebal.s_clsid, NULL);
- /* check that memory isn't within the range to clear */
- if ((void *)new_it >= slab_rebal.slab_start
- && (void *)new_it < slab_rebal.slab_end) {
- /* Pulled something we intend to free. Put it back
- * and use the main loop to kill it.
- */
- do_slabs_free(new_it, ntotal, slab_rebal.s_clsid);
- save_item = 0;
- slab_rebal.evictions_samepage++;
- }
}
pthread_mutex_unlock(&slabs_lock);
if (save_item) {
@@ -744,7 +764,7 @@ static void slab_rebalance_finish(void) {
int x;
uint32_t rescues;
uint32_t evictions_nomem;
- uint32_t evictions_samepage;
+ uint32_t inline_reclaim;
pthread_mutex_lock(&slabs_lock);
@@ -789,10 +809,10 @@ static void slab_rebalance_finish(void) {
slab_rebal.slab_end = NULL;
slab_rebal.slab_pos = NULL;
evictions_nomem = slab_rebal.evictions_nomem;
- evictions_samepage = slab_rebal.evictions_samepage;
+ inline_reclaim = slab_rebal.inline_reclaim;
rescues = slab_rebal.rescues;
slab_rebal.evictions_nomem = 0;
- slab_rebal.evictions_samepage = 0;
+ slab_rebal.inline_reclaim = 0;
slab_rebal.rescues = 0;
slab_rebalance_signal = 0;
@@ -804,7 +824,7 @@ static void slab_rebalance_finish(void) {
stats.slabs_moved++;
stats.slab_reassign_rescues += rescues;
stats.slab_reassign_evictions_nomem += evictions_nomem;
- stats.slab_reassign_evictions_samepage += evictions_samepage;
+ stats.slab_reassign_inline_reclaim += inline_reclaim;
STATS_UNLOCK();
if (settings.verbose > 1) {
diff --git a/slabs.h b/slabs.h
index fb29cfa..b993b5f 100644
--- a/slabs.h
+++ b/slabs.h
@@ -19,7 +19,8 @@ void slabs_init(const size_t limit, const double factor, const bool prealloc);
unsigned int slabs_clsid(const size_t size);
/** Allocate object of given length. 0 on error */ /*@null@*/
-void *slabs_alloc(const size_t size, unsigned int id, unsigned int *total_chunks);
+#define SLABS_ALLOC_NO_NEWPAGE 1
+void *slabs_alloc(const size_t size, unsigned int id, unsigned int *total_chunks, unsigned int flags);
/** Free previously allocated object */
void slabs_free(void *ptr, size_t size, unsigned int id);