summaryrefslogtreecommitdiff
path: root/src/defrag.c
diff options
context:
space:
mode:
authorOran Agra <oran@redislabs.com>2020-02-18 16:19:52 +0200
committerOran Agra <oran@redislabs.com>2020-02-18 17:22:32 +0200
commit485425cec76e2bfeb3e11503c4519f3d526bc09e (patch)
tree4499969adfd2f4b1fc8b9235413dee8ec3dde858 /src/defrag.c
parentdf45fed050d6822dc86796b0089aaf1b56c7830e (diff)
downloadredis-485425cec76e2bfeb3e11503c4519f3d526bc09e.tar.gz
Defrag big lists in portions to avoid latency and freeze
When active defrag kicks in and finds a big list, it will create a bookmark to a node so that it is able to resume iteration from that node later. The quicklist manages that bookmark, and updates it in case that node is deleted. This will increase memory usage only on lists of over 1000 (see active-defrag-max-scan-fields) quicklist nodes (1000 ziplists, not 1000 items) by 16 bytes. In 32 bit build, this change reduces the maximum effective config of list-compress-depth and list-max-ziplist-size (from 32767 to 8191)
Diffstat (limited to 'src/defrag.c')
-rw-r--r--src/defrag.c96
1 files changed, 67 insertions, 29 deletions
diff --git a/src/defrag.c b/src/defrag.c
index 04e57955b..e729297a5 100644
--- a/src/defrag.c
+++ b/src/defrag.c
@@ -5,8 +5,8 @@
* We do that by scanning the keyspace and for each pointer we have, we can try to
* ask the allocator if moving it to a new address will help reduce fragmentation.
*
- * Copyright (c) 2017, Oran Agra
- * Copyright (c) 2017, Redis Labs, Inc
+ * Copyright (c) 2020, Oran Agra
+ * Copyright (c) 2020, Redis Labs, Inc
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -408,25 +408,32 @@ dictEntry* replaceSateliteDictKeyPtrAndOrDefragDictEntry(dict *d, sds oldkey, sd
return NULL;
}
-long activeDefragQuickListNodes(quicklist *ql) {
- quicklistNode *node = ql->head, *newnode;
+long activeDefragQuickListNode(quicklist *ql, quicklistNode **node_ref) {
+ quicklistNode *newnode, *node = *node_ref;
long defragged = 0;
unsigned char *newzl;
+ if ((newnode = activeDefragAlloc(node))) {
+ if (newnode->prev)
+ newnode->prev->next = newnode;
+ else
+ ql->head = newnode;
+ if (newnode->next)
+ newnode->next->prev = newnode;
+ else
+ ql->tail = newnode;
+ *node_ref = node = newnode;
+ defragged++;
+ }
+ if ((newzl = activeDefragAlloc(node->zl)))
+ defragged++, node->zl = newzl;
+ return defragged;
+}
+
+long activeDefragQuickListNodes(quicklist *ql) {
+ quicklistNode *node = ql->head;
+ long defragged = 0;
while (node) {
- if ((newnode = activeDefragAlloc(node))) {
- if (newnode->prev)
- newnode->prev->next = newnode;
- else
- ql->head = newnode;
- if (newnode->next)
- newnode->next->prev = newnode;
- else
- ql->tail = newnode;
- node = newnode;
- defragged++;
- }
- if ((newzl = activeDefragAlloc(node->zl)))
- defragged++, node->zl = newzl;
+ defragged += activeDefragQuickListNode(ql, &node);
node = node->next;
}
return defragged;
@@ -440,12 +447,48 @@ void defragLater(redisDb *db, dictEntry *kde) {
listAddNodeTail(db->defrag_later, key);
}
-long scanLaterList(robj *ob) {
+/* returns 0 if no more work needs to be been done, and 1 if time is up and more work is needed. */
+long scanLaterList(robj *ob, unsigned long *cursor, long long endtime, long long *defragged) {
quicklist *ql = ob->ptr;
+ quicklistNode *node;
+ long iterations = 0;
+ int bookmark_failed = 0;
if (ob->type != OBJ_LIST || ob->encoding != OBJ_ENCODING_QUICKLIST)
return 0;
- server.stat_active_defrag_scanned+=ql->len;
- return activeDefragQuickListNodes(ql);
+
+ if (*cursor == 0) {
+ /* if cursor is 0, we start new iteration */
+ node = ql->head;
+ } else {
+ node = quicklistBookmarkFind(ql, "_AD");
+ if (!node) {
+ /* if the bookmark was deleted, it means we reached the end. */
+ *cursor = 0;
+ return 0;
+ }
+ node = node->next;
+ }
+
+ (*cursor)++;
+ while (node) {
+ (*defragged) += activeDefragQuickListNode(ql, &node);
+ server.stat_active_defrag_scanned++;
+ if (++iterations > 128 && !bookmark_failed) {
+ if (ustime() > endtime) {
+ if (!quicklistBookmarkCreate(&ql, "_AD", node)) {
+ bookmark_failed = 1;
+ } else {
+ ob->ptr = ql; /* bookmark creation may have re-allocated the quicklist */
+ return 1;
+ }
+ }
+ iterations = 0;
+ }
+ node = node->next;
+ }
+ quicklistBookmarkDelete(ql, "_AD");
+ *cursor = 0;
+ return bookmark_failed? 1: 0;
}
typedef struct {
@@ -638,7 +681,8 @@ int scanLaterStraemListpacks(robj *ob, unsigned long *cursor, long long endtime,
void *newdata = activeDefragAlloc(ri.data);
if (newdata)
raxSetData(ri.node, ri.data=newdata), (*defragged)++;
- if (++iterations > 16) {
+ server.stat_active_defrag_scanned++;
+ if (++iterations > 128) {
if (ustime() > endtime) {
serverAssert(ri.key_len==sizeof(last));
memcpy(last,ri.key,ri.key_len);
@@ -900,8 +944,7 @@ int defragLaterItem(dictEntry *de, unsigned long *cursor, long long endtime) {
if (de) {
robj *ob = dictGetVal(de);
if (ob->type == OBJ_LIST) {
- server.stat_active_defrag_hits += scanLaterList(ob);
- *cursor = 0; /* list has no scan, we must finish it in one go */
+ return scanLaterList(ob, cursor, endtime, &server.stat_active_defrag_hits);
} else if (ob->type == OBJ_SET) {
server.stat_active_defrag_hits += scanLaterSet(ob, cursor);
} else if (ob->type == OBJ_ZSET) {
@@ -961,11 +1004,6 @@ int defragLaterStep(redisDb *db, long long endtime) {
if (defragLaterItem(de, &defrag_later_cursor, endtime))
quit = 1; /* time is up, we didn't finish all the work */
- /* Don't start a new BIG key in this loop, this is because the
- * next key can be a list, and scanLaterList must be done in once cycle */
- if (!defrag_later_cursor)
- quit = 1;
-
/* Once in 16 scan iterations, 512 pointer reallocations, or 64 fields
* (if we have a lot of pointers in one hash bucket, or rehashing),
* check if we reached the time limit. */