diff options
author | Tobin C. Harding <tobin@kernel.org> | 2019-05-13 17:16:12 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-14 09:47:44 -0700 |
commit | 916ac0527837aa0be46d82804f93dd46f03aaedc (patch) | |
tree | a97bc3be16e23e178d06f7e181c3f288d51ae6cc /mm/slub.c | |
parent | 6dfd1b653c49df2dad1dcfe063a196e940e02dbd (diff) | |
download | linux-next-916ac0527837aa0be46d82804f93dd46f03aaedc.tar.gz |
slub: use slab_list instead of lru
Currently we use the page->lru list for maintaining lists of slabs. We
have a list in the page structure (slab_list) that can be used for this
purpose. Doing so makes the code cleaner since we are not overloading the
lru list.
Use the slab_list instead of the lru list for maintaining lists of slabs.
Link: http://lkml.kernel.org/r/20190402230545.2929-6-tobin@kernel.org
Signed-off-by: Tobin C. Harding <tobin@kernel.org>
Acked-by: Christoph Lameter <cl@linux.com>
Reviewed-by: Roman Gushchin <guro@fb.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 40 |
1 files changed, 20 insertions, 20 deletions
diff --git a/mm/slub.c b/mm/slub.c index 43935b4f2b9e..ce6917b7451d 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1014,7 +1014,7 @@ static void add_full(struct kmem_cache *s, return; lockdep_assert_held(&n->list_lock); - list_add(&page->lru, &n->full); + list_add(&page->slab_list, &n->full); } static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) @@ -1023,7 +1023,7 @@ static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct return; lockdep_assert_held(&n->list_lock); - list_del(&page->lru); + list_del(&page->slab_list); } /* Tracking of the number of slabs for debugging purposes */ @@ -1764,9 +1764,9 @@ __add_partial(struct kmem_cache_node *n, struct page *page, int tail) { n->nr_partial++; if (tail == DEACTIVATE_TO_TAIL) - list_add_tail(&page->lru, &n->partial); + list_add_tail(&page->slab_list, &n->partial); else - list_add(&page->lru, &n->partial); + list_add(&page->slab_list, &n->partial); } static inline void add_partial(struct kmem_cache_node *n, @@ -1780,7 +1780,7 @@ static inline void remove_partial(struct kmem_cache_node *n, struct page *page) { lockdep_assert_held(&n->list_lock); - list_del(&page->lru); + list_del(&page->slab_list); n->nr_partial--; } @@ -1854,7 +1854,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, return NULL; spin_lock(&n->list_lock); - list_for_each_entry_safe(page, page2, &n->partial, lru) { + list_for_each_entry_safe(page, page2, &n->partial, slab_list) { void *t; if (!pfmemalloc_match(page, flags)) @@ -2398,7 +2398,7 @@ static unsigned long count_partial(struct kmem_cache_node *n, struct page *page; spin_lock_irqsave(&n->list_lock, flags); - list_for_each_entry(page, &n->partial, lru) + list_for_each_entry(page, &n->partial, slab_list) x += get_count(page); spin_unlock_irqrestore(&n->list_lock, flags); return x; @@ -3696,10 +3696,10 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) BUG_ON(irqs_disabled()); spin_lock_irq(&n->list_lock); - list_for_each_entry_safe(page, h, &n->partial, lru) { + list_for_each_entry_safe(page, h, &n->partial, slab_list) { if (!page->inuse) { remove_partial(n, page); - list_add(&page->lru, &discard); + list_add(&page->slab_list, &discard); } else { list_slab_objects(s, page, "Objects remaining in %s on __kmem_cache_shutdown()"); @@ -3707,7 +3707,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) } spin_unlock_irq(&n->list_lock); - list_for_each_entry_safe(page, h, &discard, lru) + list_for_each_entry_safe(page, h, &discard, slab_list) discard_slab(s, page); } @@ -3987,7 +3987,7 @@ int __kmem_cache_shrink(struct kmem_cache *s) * Note that concurrent frees may occur while we hold the * list_lock. page->inuse here is the upper limit. */ - list_for_each_entry_safe(page, t, &n->partial, lru) { + list_for_each_entry_safe(page, t, &n->partial, slab_list) { int free = page->objects - page->inuse; /* Do not reread page->inuse */ @@ -3997,10 +3997,10 @@ int __kmem_cache_shrink(struct kmem_cache *s) BUG_ON(free <= 0); if (free == page->objects) { - list_move(&page->lru, &discard); + list_move(&page->slab_list, &discard); n->nr_partial--; } else if (free <= SHRINK_PROMOTE_MAX) - list_move(&page->lru, promote + free - 1); + list_move(&page->slab_list, promote + free - 1); } /* @@ -4013,7 +4013,7 @@ int __kmem_cache_shrink(struct kmem_cache *s) spin_unlock_irqrestore(&n->list_lock, flags); /* Release empty slabs */ - list_for_each_entry_safe(page, t, &discard, lru) + list_for_each_entry_safe(page, t, &discard, slab_list) discard_slab(s, page); if (slabs_node(s, node)) @@ -4205,11 +4205,11 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache) for_each_kmem_cache_node(s, node, n) { struct page *p; - list_for_each_entry(p, &n->partial, lru) + list_for_each_entry(p, &n->partial, slab_list) p->slab_cache = s; #ifdef CONFIG_SLUB_DEBUG - list_for_each_entry(p, &n->full, lru) + list_for_each_entry(p, &n->full, slab_list) p->slab_cache = s; #endif } @@ -4426,7 +4426,7 @@ static int validate_slab_node(struct kmem_cache *s, spin_lock_irqsave(&n->list_lock, flags); - list_for_each_entry(page, &n->partial, lru) { + list_for_each_entry(page, &n->partial, slab_list) { validate_slab_slab(s, page, map); count++; } @@ -4437,7 +4437,7 @@ static int validate_slab_node(struct kmem_cache *s, if (!(s->flags & SLAB_STORE_USER)) goto out; - list_for_each_entry(page, &n->full, lru) { + list_for_each_entry(page, &n->full, slab_list) { validate_slab_slab(s, page, map); count++; } @@ -4633,9 +4633,9 @@ static int list_locations(struct kmem_cache *s, char *buf, continue; spin_lock_irqsave(&n->list_lock, flags); - list_for_each_entry(page, &n->partial, lru) + list_for_each_entry(page, &n->partial, slab_list) process_slab(&t, s, page, alloc, map); - list_for_each_entry(page, &n->full, lru) + list_for_each_entry(page, &n->full, slab_list) process_slab(&t, s, page, alloc, map); spin_unlock_irqrestore(&n->list_lock, flags); } |