summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/kmemleak.c28
-rw-r--r--mm/memory_hotplug.c4
-rw-r--r--mm/percpu.c2
3 files changed, 25 insertions, 9 deletions
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 3cda50c1e394..1f14ef68a5c8 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -193,6 +193,8 @@ static struct kmem_cache *scan_area_cache;
/* set if tracing memory operations is enabled */
static int kmemleak_enabled;
+/* same as above but only for the kmemleak_free() callback */
+static int kmemleak_free_enabled;
/* set in the late_initcall if there were no errors */
static int kmemleak_initialized;
/* enables or disables early logging of the memory operations */
@@ -905,12 +907,13 @@ EXPORT_SYMBOL_GPL(kmemleak_alloc);
* kmemleak_alloc_percpu - register a newly allocated __percpu object
* @ptr: __percpu pointer to beginning of the object
* @size: size of the object
+ * @gfp: flags used for kmemleak internal memory allocations
*
* This function is called from the kernel percpu allocator when a new object
- * (memory block) is allocated (alloc_percpu). It assumes GFP_KERNEL
- * allocation.
+ * (memory block) is allocated (alloc_percpu).
*/
-void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size)
+void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
+ gfp_t gfp)
{
unsigned int cpu;
@@ -923,7 +926,7 @@ void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size)
if (kmemleak_enabled && ptr && !IS_ERR(ptr))
for_each_possible_cpu(cpu)
create_object((unsigned long)per_cpu_ptr(ptr, cpu),
- size, 0, GFP_KERNEL);
+ size, 0, gfp);
else if (kmemleak_early_log)
log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
}
@@ -940,7 +943,7 @@ void __ref kmemleak_free(const void *ptr)
{
pr_debug("%s(0x%p)\n", __func__, ptr);
- if (kmemleak_enabled && ptr && !IS_ERR(ptr))
+ if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
delete_object_full((unsigned long)ptr);
else if (kmemleak_early_log)
log_early(KMEMLEAK_FREE, ptr, 0, 0);
@@ -980,7 +983,7 @@ void __ref kmemleak_free_percpu(const void __percpu *ptr)
pr_debug("%s(0x%p)\n", __func__, ptr);
- if (kmemleak_enabled && ptr && !IS_ERR(ptr))
+ if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
for_each_possible_cpu(cpu)
delete_object_full((unsigned long)per_cpu_ptr(ptr,
cpu));
@@ -1743,6 +1746,13 @@ static void kmemleak_do_cleanup(struct work_struct *work)
mutex_lock(&scan_mutex);
stop_scan_thread();
+ /*
+ * Once the scan thread has stopped, it is safe to no longer track
+ * object freeing. Ordering of the scan thread stopping and the memory
+ * accesses below is guaranteed by the kthread_stop() function.
+ */
+ kmemleak_free_enabled = 0;
+
if (!kmemleak_found_leaks)
__kmemleak_do_cleanup();
else
@@ -1769,6 +1779,8 @@ static void kmemleak_disable(void)
/* check whether it is too early for a kernel thread */
if (kmemleak_initialized)
schedule_work(&cleanup_work);
+ else
+ kmemleak_free_enabled = 0;
pr_info("Kernel memory leak detector disabled\n");
}
@@ -1833,8 +1845,10 @@ void __init kmemleak_init(void)
if (kmemleak_error) {
local_irq_restore(flags);
return;
- } else
+ } else {
kmemleak_enabled = 1;
+ kmemleak_free_enabled = 1;
+ }
local_irq_restore(flags);
/*
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 8c71654e261f..05014e89efae 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1978,8 +1978,10 @@ void try_offline_node(int nid)
* wait_table may be allocated from boot memory,
* here only free if it's allocated by vmalloc.
*/
- if (is_vmalloc_addr(zone->wait_table))
+ if (is_vmalloc_addr(zone->wait_table)) {
vfree(zone->wait_table);
+ zone->wait_table = NULL;
+ }
}
}
EXPORT_SYMBOL(try_offline_node);
diff --git a/mm/percpu.c b/mm/percpu.c
index 014bab65e0ff..88bb6c92d83a 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1030,7 +1030,7 @@ area_found:
memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
- kmemleak_alloc_percpu(ptr, size);
+ kmemleak_alloc_percpu(ptr, size, gfp);
return ptr;
fail_unlock: