diff options
author | dormando <dormando@rydia.net> | 2018-02-12 14:13:53 -0800 |
---|---|---|
committer | dormando <dormando@rydia.net> | 2018-02-12 14:27:34 -0800 |
commit | 37bfa412de11c4a69e7fca0a88fd2759958647b2 (patch) | |
tree | 5c8c14565009c56c46d83239e65e5764a44fa611 /crawler.c | |
parent | abfa8cf888db4ad9edc6b41b72f8d024d53290ee (diff) | |
download | memcached-37bfa412de11c4a69e7fca0a88fd2759958647b2.tar.gz |
limit crawls for metadumper
LRU crawler metadumper is used for getting snapshot-y looks at the LRU's.
Since there's no default limit, it'll get any new items added or bumped since
the roll started.
with this change it limits the number of items dumped to the number that
existed in that LRU when the roll was kicked off. You still end up with an
approximation, but not a terrible one:
- items bumped after the crawler passes them likely won't be revisited
- items bumped before the crawler passes them will likely be visited toward
the end, or mixed with new items.
- deletes are somewhere in the middle.
Diffstat (limited to 'crawler.c')
-rw-r--r-- | crawler.c | 8 |
1 files changed, 5 insertions, 3 deletions
@@ -530,6 +530,9 @@ static int do_lru_crawler_start(uint32_t id, uint32_t remaining) { crawlers[sid].next = 0; crawlers[sid].prev = 0; crawlers[sid].time = 0; + if (remaining == LRU_CRAWLER_CAP_REMAINING) { + remaining = do_get_lru_size(sid); + } crawlers[sid].remaining = remaining; crawlers[sid].slabs_clsid = sid; crawlers[sid].reclaimed = 0; @@ -622,7 +625,7 @@ int lru_crawler_start(uint8_t *ids, uint32_t remaining, * Also only clear the crawlerstats once per sid. */ enum crawler_result_type lru_crawler_crawl(char *slabs, const enum crawler_run_type type, - void *c, const int sfd) { + void *c, const int sfd, unsigned int remaining) { char *b = NULL; uint32_t sid = 0; int starts = 0; @@ -651,8 +654,7 @@ enum crawler_result_type lru_crawler_crawl(char *slabs, const enum crawler_run_t } } - starts = lru_crawler_start(tocrawl, settings.lru_crawler_tocrawl, - type, NULL, c, sfd); + starts = lru_crawler_start(tocrawl, remaining, type, NULL, c, sfd); if (starts == -1) { return CRAWLER_RUNNING; } else if (starts == -2) { |