summaryrefslogtreecommitdiff
path: root/src/defrag.c
diff options
context:
space:
mode:
authorOran Agra <oran@redislabs.com>2020-05-20 14:08:40 +0300
committerOran Agra <oran@redislabs.com>2020-05-20 16:04:42 +0300
commit88d71f479338c1a70fac15ea37f87315f9401f99 (patch)
treedbd563607e11c608248c581ec2fa37735ddc1faf /src/defrag.c
parent5d83e9e1de94fb0250073565c051e71b7648d42d (diff)
downloadredis-88d71f479338c1a70fac15ea37f87315f9401f99.tar.gz
fix a rare active defrag edge case bug leading to stagnation
There's a rare case which leads to stagnation in the defragger, causing it to keep scanning the keyspace and do nothing (not moving any allocation), this happens when all the allocator slabs of a certain bin have the same % utilization, but the slab from which new allocations are made have a lower utilization. this commit fixes it by removing the current slab from the overall average utilization of the bin, and also eliminate any precision loss in the utilization calculation and move the decision about the defrag to reside inside jemalloc. and also add a test that consistently reproduce this issue.
Diffstat (limited to 'src/defrag.c')
-rw-r--r--src/defrag.c13
1 files changed, 3 insertions, 10 deletions
diff --git a/src/defrag.c b/src/defrag.c
index e729297a5..6e5296632 100644
--- a/src/defrag.c
+++ b/src/defrag.c
@@ -43,7 +43,7 @@
/* this method was added to jemalloc in order to help us understand which
* pointers are worthwhile moving and which aren't */
-int je_get_defrag_hint(void* ptr, int *bin_util, int *run_util);
+int je_get_defrag_hint(void* ptr);
/* forward declarations*/
void defragDictBucketCallback(void *privdata, dictEntry **bucketref);
@@ -55,18 +55,11 @@ dictEntry* replaceSateliteDictKeyPtrAndOrDefragDictEntry(dict *d, sds oldkey, sd
* when it returns a non-null value, the old pointer was already released
* and should NOT be accessed. */
void* activeDefragAlloc(void *ptr) {
- int bin_util, run_util;
size_t size;
void *newptr;
- if(!je_get_defrag_hint(ptr, &bin_util, &run_util)) {
- server.stat_active_defrag_misses++;
- return NULL;
- }
- /* if this run is more utilized than the average utilization in this bin
- * (or it is full), skip it. This will eventually move all the allocations
- * from relatively empty runs into relatively full runs. */
- if (run_util > bin_util || run_util == 1<<16) {
+ if(!je_get_defrag_hint(ptr)) {
server.stat_active_defrag_misses++;
+ size = zmalloc_size(ptr);
return NULL;
}
/* move this allocation to a new allocation.