summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authordormando <dormando@rydia.net>2015-09-28 17:45:02 -0700
committerdormando <dormando@rydia.net>2015-11-18 23:14:35 -0800
commitd5185f9c25e346417d0de1c8d704d945d76ea474 (patch)
tree96b75e6d304097452f947f3a26bbe66e1b683d69
parent004e221190cd5521593db5f462fd51f998a3265f (diff)
downloadmemcached-d5185f9c25e346417d0de1c8d704d945d76ea474.tar.gz
properly shuffle page list after slab move
used to take the newest page of the page list and replace the oldest page with it. so only the first page we move from a slab class will actually be "old". instead, actually burn the slight CPU to shuffle all of the pointers down one. Now we always chew the oldest page.
-rw-r--r--slabs.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/slabs.c b/slabs.c
index 4bc064c..3518098 100644
--- a/slabs.c
+++ b/slabs.c
@@ -35,7 +35,6 @@ typedef struct {
void **slab_list; /* array of slab pointers */
unsigned int list_size; /* size of prev array */
- unsigned int killing; /* index+1 of dying slab, or zero if none */
size_t requested; /* The number of requested bytes */
} slabclass_t;
@@ -496,9 +495,10 @@ static int slab_rebalance_start(void) {
return no_go; /* Should use a wrapper function... */
}
- s_cls->killing = 1;
-
- slab_rebal.slab_start = s_cls->slab_list[s_cls->killing - 1];
+ /* Always kill the first available slab page as it is most likely to
+ * contain the oldest items
+ */
+ slab_rebal.slab_start = s_cls->slab_list[0];
slab_rebal.slab_end = (char *)slab_rebal.slab_start +
(s_cls->size * s_cls->perslab);
slab_rebal.slab_pos = slab_rebal.slab_start;
@@ -715,17 +715,21 @@ static int slab_rebalance_move(void) {
static void slab_rebalance_finish(void) {
slabclass_t *s_cls;
slabclass_t *d_cls;
+ int x;
pthread_mutex_lock(&slabs_lock);
s_cls = &slabclass[slab_rebal.s_clsid];
d_cls = &slabclass[slab_rebal.d_clsid];
- /* At this point the stolen slab is completely clear */
- s_cls->slab_list[s_cls->killing - 1] =
- s_cls->slab_list[s_cls->slabs - 1];
+ /* At this point the stolen slab is completely clear.
+ * We always kill the "first"/"oldest" slab page in the slab_list, so
+ * shuffle the page list backwards and decrement.
+ */
+ for (x = 0; x < s_cls->slabs; x++) {
+ s_cls->slab_list[x] = s_cls->slab_list[x+1];
+ }
s_cls->slabs--;
- s_cls->killing = 0;
memset(slab_rebal.slab_start, 0, (size_t)settings.item_size_max);