From 90f2cbbc49a8fe5a49cea1d362d90e377b949d49 Mon Sep 17 00:00:00 2001 From: Ezequiel Garcia Date: Sat, 8 Sep 2012 17:47:51 -0300 Subject: mm, slob: Use NUMA_NO_NODE instead of -1 Acked-by: David Rientjes Signed-off-by: Ezequiel Garcia Signed-off-by: Pekka Enberg --- mm/slob.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'mm/slob.c') diff --git a/mm/slob.c b/mm/slob.c index 45d4ca79933a..191e1713a6d9 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -194,7 +194,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node) void *page; #ifdef CONFIG_NUMA - if (node != -1) + if (node != NUMA_NO_NODE) page = alloc_pages_exact_node(node, gfp, order); else #endif @@ -290,7 +290,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) * If there's a node specification, search for a partial * page with a matching node id in the freelist. */ - if (node != -1 && page_to_nid(sp) != node) + if (node != NUMA_NO_NODE && page_to_nid(sp) != node) continue; #endif /* Enough room on this page? */ @@ -514,7 +514,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, struct kmem_cache *c; c = slob_alloc(sizeof(struct kmem_cache), - GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1); + GFP_KERNEL, ARCH_KMALLOC_MINALIGN, NUMA_NO_NODE); if (c) { c->name = name; -- cgit v1.2.1 From f3f741019595f1e73564d985f5fe8abcbb98c769 Mon Sep 17 00:00:00 2001 From: Ezequiel Garcia Date: Sat, 8 Sep 2012 17:47:53 -0300 Subject: mm, slob: Add support for kmalloc_track_caller() Currently slob falls back to regular kmalloc for this case. With this patch kmalloc_track_caller() is correctly implemented, thus tracing the specified caller. This is important to trace accurately allocations performed by krealloc, kstrdup, kmemdup, etc. Signed-off-by: Ezequiel Garcia Signed-off-by: Pekka Enberg --- mm/slob.c | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) (limited to 'mm/slob.c') diff --git a/mm/slob.c b/mm/slob.c index 191e1713a6d9..dd47d16d57b6 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -425,7 +425,8 @@ out: * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend. */ -void *__kmalloc_node(size_t size, gfp_t gfp, int node) +static __always_inline void * +__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) { unsigned int *m; int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); @@ -446,7 +447,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) *m = size; ret = (void *)m + align; - trace_kmalloc_node(_RET_IP_, ret, + trace_kmalloc_node(caller, ret, size, size + align, gfp, node); } else { unsigned int order = get_order(size); @@ -460,15 +461,35 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) page->private = size; } - trace_kmalloc_node(_RET_IP_, ret, + trace_kmalloc_node(caller, ret, size, PAGE_SIZE << order, gfp, node); } kmemleak_alloc(ret, size, 1, gfp); return ret; } + +void *__kmalloc_node(size_t size, gfp_t gfp, int node) +{ + return __do_kmalloc_node(size, gfp, node, _RET_IP_); +} EXPORT_SYMBOL(__kmalloc_node); +#ifdef CONFIG_TRACING +void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller) +{ + return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller); +} + +#ifdef CONFIG_NUMA +void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, + int node, unsigned long caller) +{ + return __do_kmalloc_node(size, gfp, node, caller); +} +#endif +#endif + void kfree(const void *block) { struct page *sp; -- cgit v1.2.1