From a285909f471d6703a04b2b3942c352e27131c92b Mon Sep 17 00:00:00 2001 From: Hyeonggon Yoo <42.hyeyoo@gmail.com> Date: Wed, 6 Apr 2022 15:00:03 +0900 Subject: mm/slub, kunit: Make slub_kunit unaffected by user specified flags slub_kunit does not expect other debugging flags to be set when running tests. When SLAB_RED_ZONE flag is set globally, test fails because the flag affects number of errors reported. To make slub_kunit unaffected by user specified debugging flags, introduce SLAB_NO_USER_FLAGS to ignore them. With this flag, only flags specified in the code are used and others are ignored. Suggested-by: Vlastimil Babka Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka Link: https://lore.kernel.org/r/Yk0sY9yoJhFEXWOg@hyeyoo --- mm/slub.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index 74d92aa4a3a2..4c78f5919356 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1584,6 +1584,9 @@ slab_flags_t kmem_cache_flags(unsigned int object_size, slab_flags_t block_flags; slab_flags_t slub_debug_local = slub_debug; + if (flags & SLAB_NO_USER_FLAGS) + return flags; + /* * If the slab cache is for debugging (e.g. kmemleak) then * don't store user (stack trace) information by default, -- cgit v1.2.1 From 0cd1a02901858049d14b5b9d4c5c680b012c8cc1 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Fri, 4 Feb 2022 17:44:40 +0100 Subject: mm/slub: move struct track init out of set_track() set_track() either zeroes out the struct track or fills it, depending on the addr parameter. This is unnecessary as there's only one place that calls it for the initialization - init_tracking(). We can simply do the zeroing there, with a single memset() that covers both TRACK_ALLOC and TRACK_FREE as they are adjacent. Signed-off-by: Vlastimil Babka Reviewed-and-tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Acked-by: David Rientjes --- mm/slub.c | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index 74d92aa4a3a2..cd4fd0159911 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -729,34 +729,32 @@ static void set_track(struct kmem_cache *s, void *object, { struct track *p = get_track(s, object, alloc); - if (addr) { #ifdef CONFIG_STACKTRACE - unsigned int nr_entries; + unsigned int nr_entries; - metadata_access_enable(); - nr_entries = stack_trace_save(kasan_reset_tag(p->addrs), - TRACK_ADDRS_COUNT, 3); - metadata_access_disable(); + metadata_access_enable(); + nr_entries = stack_trace_save(kasan_reset_tag(p->addrs), + TRACK_ADDRS_COUNT, 3); + metadata_access_disable(); - if (nr_entries < TRACK_ADDRS_COUNT) - p->addrs[nr_entries] = 0; + if (nr_entries < TRACK_ADDRS_COUNT) + p->addrs[nr_entries] = 0; #endif - p->addr = addr; - p->cpu = smp_processor_id(); - p->pid = current->pid; - p->when = jiffies; - } else { - memset(p, 0, sizeof(struct track)); - } + p->addr = addr; + p->cpu = smp_processor_id(); + p->pid = current->pid; + p->when = jiffies; } static void init_tracking(struct kmem_cache *s, void *object) { + struct track *p; + if (!(s->flags & SLAB_STORE_USER)) return; - set_track(s, object, TRACK_FREE, 0UL); - set_track(s, object, TRACK_ALLOC, 0UL); + p = get_track(s, object, TRACK_ALLOC); + memset(p, 0, 2*sizeof(struct track)); } static void print_track(const char *s, struct track *t, unsigned long pr_time) -- cgit v1.2.1 From 5cf909c553e9efed573811de4b3f5172898d5515 Mon Sep 17 00:00:00 2001 From: Oliver Glitta Date: Wed, 7 Jul 2021 18:07:47 -0700 Subject: mm/slub: use stackdepot to save stack trace in objects Many stack traces are similar so there are many similar arrays. Stackdepot saves each unique stack only once. Replace field addrs in struct track with depot_stack_handle_t handle. Use stackdepot to save stack trace. The benefits are smaller memory overhead and possibility to aggregate per-cache statistics in the following patch using the stackdepot handle instead of matching stacks manually. [ vbabka@suse.cz: rebase to 5.17-rc1 and adjust accordingly ] This was initially merged as commit 788691464c29 and reverted by commit ae14c63a9f20 due to several issues, that should now be fixed. The problem of unconditional memory overhead by stackdepot has been addressed by commit 2dba5eb1c73b ("lib/stackdepot: allow optional init and stack_table allocation by kvmalloc()"), so the dependency on stackdepot will result in extra memory usage only when a slab cache tracking is actually enabled, and not for all CONFIG_SLUB_DEBUG builds. The build failures on some architectures were also addressed, and the reported issue with xfs/433 test did not reproduce on 5.17-rc1 with this patch. Signed-off-by: Oliver Glitta Signed-off-by: Vlastimil Babka Reviewed-and-tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Acked-by: David Rientjes Cc: David Rientjes Cc: Christoph Lameter Cc: Pekka Enberg Cc: Joonsoo Kim --- mm/slub.c | 71 +++++++++++++++++++++++++++++++++++---------------------------- 1 file changed, 40 insertions(+), 31 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index cd4fd0159911..98c1450c23f0 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -264,8 +265,8 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) #define TRACK_ADDRS_COUNT 16 struct track { unsigned long addr; /* Called from address */ -#ifdef CONFIG_STACKTRACE - unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */ +#ifdef CONFIG_STACKDEPOT + depot_stack_handle_t handle; #endif int cpu; /* Was running on cpu */ int pid; /* Pid context */ @@ -724,22 +725,19 @@ static struct track *get_track(struct kmem_cache *s, void *object, return kasan_reset_tag(p + alloc); } -static void set_track(struct kmem_cache *s, void *object, +static void noinline set_track(struct kmem_cache *s, void *object, enum track_item alloc, unsigned long addr) { struct track *p = get_track(s, object, alloc); -#ifdef CONFIG_STACKTRACE +#ifdef CONFIG_STACKDEPOT + unsigned long entries[TRACK_ADDRS_COUNT]; unsigned int nr_entries; - metadata_access_enable(); - nr_entries = stack_trace_save(kasan_reset_tag(p->addrs), - TRACK_ADDRS_COUNT, 3); - metadata_access_disable(); - - if (nr_entries < TRACK_ADDRS_COUNT) - p->addrs[nr_entries] = 0; + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3); + p->handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT); #endif + p->addr = addr; p->cpu = smp_processor_id(); p->pid = current->pid; @@ -759,20 +757,19 @@ static void init_tracking(struct kmem_cache *s, void *object) static void print_track(const char *s, struct track *t, unsigned long pr_time) { + depot_stack_handle_t handle __maybe_unused; + if (!t->addr) return; pr_err("%s in %pS age=%lu cpu=%u pid=%d\n", s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); -#ifdef CONFIG_STACKTRACE - { - int i; - for (i = 0; i < TRACK_ADDRS_COUNT; i++) - if (t->addrs[i]) - pr_err("\t%pS\n", (void *)t->addrs[i]); - else - break; - } +#ifdef CONFIG_STACKDEPOT + handle = READ_ONCE(t->handle); + if (handle) + stack_depot_print(handle); + else + pr_err("object allocation/free stack trace missing\n"); #endif } @@ -1532,6 +1529,8 @@ static int __init setup_slub_debug(char *str) global_slub_debug_changed = true; } else { slab_list_specified = true; + if (flags & SLAB_STORE_USER) + stack_depot_want_early_init(); } } @@ -1549,6 +1548,8 @@ static int __init setup_slub_debug(char *str) } out: slub_debug = global_flags; + if (slub_debug & SLAB_STORE_USER) + stack_depot_want_early_init(); if (slub_debug != 0 || slub_debug_string) static_branch_enable(&slub_debug_enabled); else @@ -4342,18 +4343,26 @@ void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) objp = fixup_red_left(s, objp); trackp = get_track(s, objp, TRACK_ALLOC); kpp->kp_ret = (void *)trackp->addr; -#ifdef CONFIG_STACKTRACE - for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) { - kpp->kp_stack[i] = (void *)trackp->addrs[i]; - if (!kpp->kp_stack[i]) - break; - } +#ifdef CONFIG_STACKDEPOT + { + depot_stack_handle_t handle; + unsigned long *entries; + unsigned int nr_entries; + + handle = READ_ONCE(trackp->handle); + if (handle) { + nr_entries = stack_depot_fetch(handle, &entries); + for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) + kpp->kp_stack[i] = (void *)entries[i]; + } - trackp = get_track(s, objp, TRACK_FREE); - for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) { - kpp->kp_free_stack[i] = (void *)trackp->addrs[i]; - if (!kpp->kp_free_stack[i]) - break; + trackp = get_track(s, objp, TRACK_FREE); + handle = READ_ONCE(trackp->handle); + if (handle) { + nr_entries = stack_depot_fetch(handle, &entries); + for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) + kpp->kp_free_stack[i] = (void *)entries[i]; + } } #endif #endif -- cgit v1.2.1 From 8ea9fb921bc68376081a9db410cf1d6afa0c6fb9 Mon Sep 17 00:00:00 2001 From: Oliver Glitta Date: Fri, 21 May 2021 14:11:25 +0200 Subject: mm/slub: distinguish and print stack traces in debugfs files Aggregate objects in slub cache by unique stack trace in addition to caller address when producing contents of debugfs files alloc_traces and free_traces in debugfs. Also add the stack traces to the debugfs output. This makes it much more useful to e.g. debug memory leaks. Signed-off-by: Oliver Glitta Signed-off-by: Vlastimil Babka Reviewed-and-tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> --- mm/slub.c | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index 98c1450c23f0..f2e550e1adf0 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5064,6 +5064,7 @@ EXPORT_SYMBOL(validate_slab_cache); */ struct location { + depot_stack_handle_t handle; unsigned long count; unsigned long addr; long long sum_time; @@ -5116,9 +5117,13 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, { long start, end, pos; struct location *l; - unsigned long caddr; + unsigned long caddr, chandle; unsigned long age = jiffies - track->when; + depot_stack_handle_t handle = 0; +#ifdef CONFIG_STACKDEPOT + handle = READ_ONCE(track->handle); +#endif start = -1; end = t->count; @@ -5133,7 +5138,8 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, break; caddr = t->loc[pos].addr; - if (track->addr == caddr) { + chandle = t->loc[pos].handle; + if ((track->addr == caddr) && (handle == chandle)) { l = &t->loc[pos]; l->count++; @@ -5158,6 +5164,8 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, if (track->addr < caddr) end = pos; + else if (track->addr == caddr && handle < chandle) + end = pos; else start = pos; } @@ -5180,6 +5188,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, l->max_time = age; l->min_pid = track->pid; l->max_pid = track->pid; + l->handle = handle; cpumask_clear(to_cpumask(l->cpus)); cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); nodes_clear(l->nodes); @@ -6089,6 +6098,21 @@ static int slab_debugfs_show(struct seq_file *seq, void *v) seq_printf(seq, " nodes=%*pbl", nodemask_pr_args(&l->nodes)); +#ifdef CONFIG_STACKDEPOT + { + depot_stack_handle_t handle; + unsigned long *entries; + unsigned int nr_entries, j; + + handle = READ_ONCE(l->handle); + if (handle) { + nr_entries = stack_depot_fetch(handle, &entries); + seq_puts(seq, "\n"); + for (j = 0; j < nr_entries; j++) + seq_printf(seq, " %pS\n", (void *)entries[j]); + } + } +#endif seq_puts(seq, "\n"); } -- cgit v1.2.1 From 553c0369b3e132b4c1081700029f6d9e1a7332cf Mon Sep 17 00:00:00 2001 From: Oliver Glitta Date: Fri, 21 May 2021 14:11:26 +0200 Subject: mm/slub: sort debugfs output by frequency of stack traces Sort the output of debugfs alloc_traces and free_traces by the frequency of allocation/freeing stack traces. Most frequently used stack traces will be printed first, e.g. for easier memory leak debugging. Signed-off-by: Oliver Glitta Signed-off-by: Vlastimil Babka Reviewed-and-tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Acked-by: David Rientjes --- mm/slub.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index f2e550e1adf0..2963dc123336 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include @@ -6137,6 +6138,17 @@ static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos) return NULL; } +static int cmp_loc_by_count(const void *a, const void *b, const void *data) +{ + struct location *loc1 = (struct location *)a; + struct location *loc2 = (struct location *)b; + + if (loc1->count > loc2->count) + return -1; + else + return 1; +} + static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos) { struct loc_track *t = seq->private; @@ -6198,6 +6210,10 @@ static int slab_debug_trace_open(struct inode *inode, struct file *filep) spin_unlock_irqrestore(&n->list_lock, flags); } + /* Sort locations by count */ + sort_r(t->loc, t->count, sizeof(struct location), + cmp_loc_by_count, NULL, NULL); + bitmap_free(obj_map); return 0; } -- cgit v1.2.1 From c0f81a94d405621bb492becf905f6d8267a14f9a Mon Sep 17 00:00:00 2001 From: JaeSang Yoo Date: Mon, 11 Apr 2022 16:25:34 +0900 Subject: mm/slub: remove unused parameter in setup_object*() setup_object_debug() and setup_object() has unused parameter, "struct slab *slab". Remove it. By the commit 3ec0974210fe ("SLUB: Simplify debug code"), setup_object_debug() were introduced to refactor previous code blocks in the setup_object(). Previous code used SlabDebug() to init_object() and init_tracking(). As the SlabDebug() takes "struct page *page" as argument, the setup_object_debug() checks flag of "struct kmem_cache *s" which doesn't require "struct page *page". As the struct page were changed into struct slab by commit bb192ed9aa719 ("mm/slub: Convert most struct page to struct slab by spatch"), but it's still unused parameter. Suggested-by: Ohhoon Kwon Signed-off-by: JaeSang Yoo Acked-by: David Rientjes Signed-off-by: Vlastimil Babka Link: https://lore.kernel.org/r/20220411072534.3372768-1-jsyoo5b@gmail.com --- mm/slub.c | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index 4c78f5919356..a34e40edc980 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1268,8 +1268,7 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) } /* Object debug checks for alloc/free paths */ -static void setup_object_debug(struct kmem_cache *s, struct slab *slab, - void *object) +static void setup_object_debug(struct kmem_cache *s, void *object) { if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)) return; @@ -1631,8 +1630,7 @@ slab_flags_t kmem_cache_flags(unsigned int object_size, return flags | slub_debug_local; } #else /* !CONFIG_SLUB_DEBUG */ -static inline void setup_object_debug(struct kmem_cache *s, - struct slab *slab, void *object) {} +static inline void setup_object_debug(struct kmem_cache *s, void *object) {} static inline void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {} @@ -1775,10 +1773,9 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s, return *head != NULL; } -static void *setup_object(struct kmem_cache *s, struct slab *slab, - void *object) +static void *setup_object(struct kmem_cache *s, void *object) { - setup_object_debug(s, slab, object); + setup_object_debug(s, object); object = kasan_init_slab_obj(s, object); if (unlikely(s->ctor)) { kasan_unpoison_object_data(s, object); @@ -1897,13 +1894,13 @@ static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) /* First entry is used as the base of the freelist */ cur = next_freelist_entry(s, slab, &pos, start, page_limit, freelist_count); - cur = setup_object(s, slab, cur); + cur = setup_object(s, cur); slab->freelist = cur; for (idx = 1; idx < slab->objects; idx++) { next = next_freelist_entry(s, slab, &pos, start, page_limit, freelist_count); - next = setup_object(s, slab, next); + next = setup_object(s, next); set_freepointer(s, cur, next); cur = next; } @@ -1974,11 +1971,11 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) if (!shuffle) { start = fixup_red_left(s, start); - start = setup_object(s, slab, start); + start = setup_object(s, start); slab->freelist = start; for (idx = 0, p = start; idx < slab->objects - 1; idx++) { next = p + s->size; - next = setup_object(s, slab, next); + next = setup_object(s, next); set_freepointer(s, p, next); p = next; } -- cgit v1.2.1 From 27c08f751cb1fc874562e9b18d70ea2af33ca889 Mon Sep 17 00:00:00 2001 From: Jiyoup Kim Date: Sun, 10 Apr 2022 00:05:37 +0900 Subject: mm/slub: remove duplicate flag in allocate_slab() In allocate_slab(), __GFP_NOFAIL flag is removed twice when trying higher-order allocation. Remove it. Signed-off-by: Jiyoup Kim Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Reviewed-by: Muchun Song Acked-by: David Rientjes Signed-off-by: Vlastimil Babka Link: https://lore.kernel.org/r/20220409150538.1264-1-lakroforce@gmail.com --- mm/slub.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index a34e40edc980..3f775e77aae7 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1939,7 +1939,7 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) */ alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) - alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL); + alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_RECLAIM; slab = alloc_slab_page(alloc_gfp, node, oo); if (unlikely(!slab)) { -- cgit v1.2.1 From 6b6efe23942536f3a2d2ae25c92d34d885f020c8 Mon Sep 17 00:00:00 2001 From: JaeSang Yoo Date: Sat, 9 Apr 2022 23:42:39 +0900 Subject: mm/slub: remove meaningless node check in ___slab_alloc() node_match() with node=NUMA_NO_NODE always returns 1. Duplicate check by goto statement is meaningless. Remove it. Signed-off-by: JaeSang Yoo Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Reviewed-by: Muchun Song Acked-by: David Rientjes Signed-off-by: Vlastimil Babka Link: https://lore.kernel.org/r/20220409144239.2649257-1-jsyoo5b@gmail.com --- mm/slub.c | 1 - 1 file changed, 1 deletion(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index 3f775e77aae7..bf63739f9649 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2910,7 +2910,6 @@ redo: */ if (!node_isset(node, slab_nodes)) { node = NUMA_NO_NODE; - goto redo; } else { stat(s, ALLOC_NODE_MISMATCH); goto deactivate_slab; -- cgit v1.2.1 From a204e6d626126d33f34f43374d5abd00141f3e65 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Tue, 19 Apr 2022 20:03:52 +0800 Subject: mm/slub: remove unneeded return value of slab_pad_check The return value of slab_pad_check is never used. So we can make it return void now. Signed-off-by: Miaohe Lin Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka Link: https://lore.kernel.org/r/20220419120352.37825-1-linmiaohe@huawei.com --- mm/slub.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index bf63739f9649..bfed9cfba36a 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1021,7 +1021,7 @@ static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p) } /* Check the pad bytes at the end of a slab page */ -static int slab_pad_check(struct kmem_cache *s, struct slab *slab) +static void slab_pad_check(struct kmem_cache *s, struct slab *slab) { u8 *start; u8 *fault; @@ -1031,21 +1031,21 @@ static int slab_pad_check(struct kmem_cache *s, struct slab *slab) int remainder; if (!(s->flags & SLAB_POISON)) - return 1; + return; start = slab_address(slab); length = slab_size(slab); end = start + length; remainder = length % s->size; if (!remainder) - return 1; + return; pad = end - remainder; metadata_access_enable(); fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder); metadata_access_disable(); if (!fault) - return 1; + return; while (end > fault && end[-1] == POISON_INUSE) end--; @@ -1054,7 +1054,6 @@ static int slab_pad_check(struct kmem_cache *s, struct slab *slab) print_section(KERN_ERR, "Padding ", pad, remainder); restore_bytes(s, "slab padding", POISON_INUSE, fault, end); - return 0; } static int check_object(struct kmem_cache *s, struct slab *slab, @@ -1642,8 +1641,7 @@ static inline int free_debug_processing( void *head, void *tail, int bulk_cnt, unsigned long addr) { return 0; } -static inline int slab_pad_check(struct kmem_cache *s, struct slab *slab) - { return 1; } +static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {} static inline int check_object(struct kmem_cache *s, struct slab *slab, void *object, u8 val) { return 1; } static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, -- cgit v1.2.1 From 23587f7c5daa936524ede26201253a089666b5d9 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Fri, 29 Apr 2022 17:05:45 +0800 Subject: mm/slub: remove unused kmem_cache_order_objects max max field holds the largest slab order that was ever used for a slab cache. But it's unused now. Remove it. Signed-off-by: Miaohe Lin Reviewed-by: Muchun Song Acked-by: David Rientjes Signed-off-by: Vlastimil Babka Link: https://lore.kernel.org/r/20220429090545.33413-1-linmiaohe@huawei.com --- mm/slub.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index 74d92aa4a3a2..547b5168c938 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4165,8 +4165,6 @@ static int calculate_sizes(struct kmem_cache *s) */ s->oo = oo_make(order, size); s->min = oo_make(get_order(size), size); - if (oo_objects(s->oo) > oo_objects(s->max)) - s->max = s->oo; return !!oo_objects(s->oo); } -- cgit v1.2.1