summaryrefslogtreecommitdiff
path: root/include/linux/slub_def.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/slub_def.h')
-rw-r--r--include/linux/slub_def.h18
1 files changed, 9 insertions, 9 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 8a9c2876ca89..33c5c0e3bd8d 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -158,11 +158,11 @@ static inline void sysfs_slab_release(struct kmem_cache *s)
void *fixup_red_left(struct kmem_cache *s, void *p);
-static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
+static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
void *x) {
- void *object = x - (x - page_address(page)) % cache->size;
- void *last_object = page_address(page) +
- (page->objects - 1) * cache->size;
+ void *object = x - (x - slab_address(slab)) % cache->size;
+ void *last_object = slab_address(slab) +
+ (slab->objects - 1) * cache->size;
void *result = (unlikely(object > last_object)) ? last_object : object;
result = fixup_red_left(cache, result);
@@ -178,16 +178,16 @@ static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
}
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
- const struct page *page, void *obj)
+ const struct slab *slab, void *obj)
{
if (is_kfence_address(obj))
return 0;
- return __obj_to_index(cache, page_address(page), obj);
+ return __obj_to_index(cache, slab_address(slab), obj);
}
-static inline int objs_per_slab_page(const struct kmem_cache *cache,
- const struct page *page)
+static inline int objs_per_slab(const struct kmem_cache *cache,
+ const struct slab *slab)
{
- return page->objects;
+ return slab->objects;
}
#endif /* _LINUX_SLUB_DEF_H */