summaryrefslogtreecommitdiff
path: root/mm/cma.c
diff options
context:
space:
mode:
authorHari Bathini <hbathini@linux.ibm.com>2022-03-22 14:46:14 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-03-22 15:57:09 -0700
commit27d121d0ec6d604d0147c5b579e4181b688a2d64 (patch)
treec0adf39071be6b925b33182582dd09be84a5cb94 /mm/cma.c
parent9d84604b845c3888d1bede43d16ab3ebedb13e24 (diff)
downloadlinux-next-27d121d0ec6d604d0147c5b579e4181b688a2d64.tar.gz
mm/cma: provide option to opt out from exposing pages on activation failure
Patch series "powerpc/fadump: handle CMA activation failure appropriately", v3. Commit 072355c1cf2d ("mm/cma: expose all pages to the buddy if activation of an area fails") started exposing all pages to buddy allocator on CMA activation failure. But there can be CMA users that want to handle the reserved memory differently on CMA allocation failure. Provide an option to opt out from exposing pages to buddy for such cases. Link: https://lkml.kernel.org/r/20220117075246.36072-1-hbathini@linux.ibm.com Link: https://lkml.kernel.org/r/20220117075246.36072-2-hbathini@linux.ibm.com Signed-off-by: Hari Bathini <hbathini@linux.ibm.com> Reviewed-by: David Hildenbrand <david@redhat.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Mahesh Salgaonkar <mahesh@linux.ibm.com> Cc: Sourabh Jain <sourabhjain@linux.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/cma.c')
-rw-r--r--mm/cma.c11
1 files changed, 9 insertions, 2 deletions
diff --git a/mm/cma.c b/mm/cma.c
index 5a2cd5851658..eaa4b5c920a2 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -131,8 +131,10 @@ not_in_zone:
bitmap_free(cma->bitmap);
out_error:
/* Expose all pages to the buddy, they are useless for CMA. */
- for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++)
- free_reserved_page(pfn_to_page(pfn));
+ if (!cma->reserve_pages_on_error) {
+ for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++)
+ free_reserved_page(pfn_to_page(pfn));
+ }
totalcma_pages -= cma->count;
cma->count = 0;
pr_err("CMA area %s could not be activated\n", cma->name);
@@ -150,6 +152,11 @@ static int __init cma_init_reserved_areas(void)
}
core_initcall(cma_init_reserved_areas);
+void __init cma_reserve_pages_on_error(struct cma *cma)
+{
+ cma->reserve_pages_on_error = true;
+}
+
/**
* cma_init_reserved_mem() - create custom contiguous area from reserved memory
* @base: Base address of the reserved area