summaryrefslogtreecommitdiff
path: root/slabs.c
diff options
context:
space:
mode:
authordormando <dormando@rydia.net>2015-10-21 11:57:55 -0700
committerdormando <dormando@rydia.net>2015-11-18 23:14:36 -0800
commitec937e5ed1cdc3019f0ae5f2c362110ea54450d1 (patch)
tree1e98a1421ee57ac7460c6fc42293f5aa69d1fee0 /slabs.c
parentb1debc4c96134e9014a929c32d6990cb5d66a22c (diff)
downloadmemcached-ec937e5ed1cdc3019f0ae5f2c362110ea54450d1.tar.gz
fix over-inflation of total_malloced
mem_alloced was getting increased every time a page was assigned out of either malloc or the global page pool. This means total_malloced will inflate forever as pages are reused, and once limit_maxbytes is surpassed it will stop attempting to malloc more memory. The result is we would stop malloc'ing new memory too early if page reclaim happens before the whole thing fills. The test already caused this condition, so adding the extra checks was trivial.
Diffstat (limited to 'slabs.c')
-rw-r--r--slabs.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/slabs.c b/slabs.c
index 770480e..31e85f6 100644
--- a/slabs.c
+++ b/slabs.c
@@ -232,7 +232,6 @@ static int do_slabs_newslab(const unsigned int id) {
split_slab_page_into_freelist(ptr, id);
p->slab_list[p->slabs++] = ptr;
- mem_malloced += len;
MEMCACHED_SLABS_SLABCLASS_ALLOCATE(id);
return 1;
@@ -430,6 +429,7 @@ static void *memory_allocate(size_t size) {
mem_avail = 0;
}
}
+ mem_malloced += size;
return ret;
}