diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-29 17:29:11 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-29 17:29:11 -0700 |
commit | 65090f30ab791810a3dc840317e57df05018559c (patch) | |
tree | f417526656da37109777e89613e140ffc59228bc /mm/vmalloc.c | |
parent | 349a2d52ffe59b7a0c5876fa7ee9f3eaf188b830 (diff) | |
parent | 0ed950d1f28142ccd9a9453c60df87853530d778 (diff) | |
download | linux-65090f30ab791810a3dc840317e57df05018559c.tar.gz |
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton:
"191 patches.
Subsystems affected by this patch series: kthread, ia64, scripts,
ntfs, squashfs, ocfs2, kernel/watchdog, and mm (gup, pagealloc, slab,
slub, kmemleak, dax, debug, pagecache, gup, swap, memcg, pagemap,
mprotect, bootmem, dma, tracing, vmalloc, kasan, initialization,
pagealloc, and memory-failure)"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (191 commits)
mm,hwpoison: make get_hwpoison_page() call get_any_page()
mm,hwpoison: send SIGBUS with error virutal address
mm/page_alloc: split pcp->high across all online CPUs for cpuless nodes
mm/page_alloc: allow high-order pages to be stored on the per-cpu lists
mm: replace CONFIG_FLAT_NODE_MEM_MAP with CONFIG_FLATMEM
mm: replace CONFIG_NEED_MULTIPLE_NODES with CONFIG_NUMA
docs: remove description of DISCONTIGMEM
arch, mm: remove stale mentions of DISCONIGMEM
mm: remove CONFIG_DISCONTIGMEM
m68k: remove support for DISCONTIGMEM
arc: remove support for DISCONTIGMEM
arc: update comment about HIGHMEM implementation
alpha: remove DISCONTIGMEM and NUMA
mm/page_alloc: move free_the_page
mm/page_alloc: fix counting of managed_pages
mm/page_alloc: improve memmap_pages dbg msg
mm: drop SECTION_SHIFT in code comments
mm/page_alloc: introduce vm.percpu_pagelist_high_fraction
mm/page_alloc: limit the number of pages on PCP lists when reclaim is active
mm/page_alloc: scale the number of pages that are batch freed
...
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r-- | mm/vmalloc.c | 121 |
1 files changed, 75 insertions, 46 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index d0a7d89be091..b2ec7f751bd0 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2567,6 +2567,7 @@ static void __vunmap(const void *addr, int deallocate_pages) BUG_ON(!page); __free_pages(page, page_order); + cond_resched(); } atomic_long_sub(area->nr_pages, &nr_vmalloc_pages); @@ -2758,6 +2759,54 @@ void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot) EXPORT_SYMBOL_GPL(vmap_pfn); #endif /* CONFIG_VMAP_PFN */ +static inline unsigned int +vm_area_alloc_pages(gfp_t gfp, int nid, + unsigned int order, unsigned long nr_pages, struct page **pages) +{ + unsigned int nr_allocated = 0; + + /* + * For order-0 pages we make use of bulk allocator, if + * the page array is partly or not at all populated due + * to fails, fallback to a single page allocator that is + * more permissive. + */ + if (!order) + nr_allocated = alloc_pages_bulk_array_node( + gfp, nid, nr_pages, pages); + else + /* + * Compound pages required for remap_vmalloc_page if + * high-order pages. + */ + gfp |= __GFP_COMP; + + /* High-order pages or fallback path if "bulk" fails. */ + while (nr_allocated < nr_pages) { + struct page *page; + int i; + + page = alloc_pages_node(nid, gfp, order); + if (unlikely(!page)) + break; + + /* + * Careful, we allocate and map page-order pages, but + * tracking is done per PAGE_SIZE page so as to keep the + * vm_struct APIs independent of the physical/mapped size. + */ + for (i = 0; i < (1U << order); i++) + pages[nr_allocated + i] = page + i; + + if (gfpflags_allow_blocking(gfp)) + cond_resched(); + + nr_allocated += 1U << order; + } + + return nr_allocated; +} + static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot, unsigned int page_shift, int node) @@ -2768,8 +2817,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, unsigned long array_size; unsigned int nr_small_pages = size >> PAGE_SHIFT; unsigned int page_order; - struct page **pages; - unsigned int i; array_size = (unsigned long)nr_small_pages * sizeof(struct page *); gfp_mask |= __GFP_NOWARN; @@ -2778,62 +2825,44 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, /* Please note that the recursion is strictly bounded. */ if (array_size > PAGE_SIZE) { - pages = __vmalloc_node(array_size, 1, nested_gfp, node, + area->pages = __vmalloc_node(array_size, 1, nested_gfp, node, area->caller); } else { - pages = kmalloc_node(array_size, nested_gfp, node); + area->pages = kmalloc_node(array_size, nested_gfp, node); } - if (!pages) { - free_vm_area(area); + if (!area->pages) { warn_alloc(gfp_mask, NULL, - "vmalloc size %lu allocation failure: " - "page array size %lu allocation failed", - nr_small_pages * PAGE_SIZE, array_size); + "vmalloc error: size %lu, failed to allocated page array size %lu", + nr_small_pages * PAGE_SIZE, array_size); + free_vm_area(area); return NULL; } - area->pages = pages; - area->nr_pages = nr_small_pages; set_vm_area_page_order(area, page_shift - PAGE_SHIFT); - page_order = vm_area_page_order(area); - /* - * Careful, we allocate and map page_order pages, but tracking is done - * per PAGE_SIZE page so as to keep the vm_struct APIs independent of - * the physical/mapped size. - */ - for (i = 0; i < area->nr_pages; i += 1U << page_order) { - struct page *page; - int p; - - /* Compound pages required for remap_vmalloc_page */ - page = alloc_pages_node(node, gfp_mask | __GFP_COMP, page_order); - if (unlikely(!page)) { - /* Successfully allocated i pages, free them in __vfree() */ - area->nr_pages = i; - atomic_long_add(area->nr_pages, &nr_vmalloc_pages); - warn_alloc(gfp_mask, NULL, - "vmalloc size %lu allocation failure: " - "page order %u allocation failed", - area->nr_pages * PAGE_SIZE, page_order); - goto fail; - } + area->nr_pages = vm_area_alloc_pages(gfp_mask, node, + page_order, nr_small_pages, area->pages); - for (p = 0; p < (1U << page_order); p++) - area->pages[i + p] = page + p; + atomic_long_add(area->nr_pages, &nr_vmalloc_pages); - if (gfpflags_allow_blocking(gfp_mask)) - cond_resched(); + /* + * If not enough pages were obtained to accomplish an + * allocation request, free them via __vfree() if any. + */ + if (area->nr_pages != nr_small_pages) { + warn_alloc(gfp_mask, NULL, + "vmalloc error: size %lu, page order %u, failed to allocate pages", + area->nr_pages * PAGE_SIZE, page_order); + goto fail; } - atomic_long_add(area->nr_pages, &nr_vmalloc_pages); - if (vmap_pages_range(addr, addr + size, prot, pages, page_shift) < 0) { + if (vmap_pages_range(addr, addr + size, prot, area->pages, + page_shift) < 0) { warn_alloc(gfp_mask, NULL, - "vmalloc size %lu allocation failure: " - "failed to map pages", - area->nr_pages * PAGE_SIZE); + "vmalloc error: size %lu, failed to map pages", + area->nr_pages * PAGE_SIZE); goto fail; } @@ -2878,8 +2907,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, if ((size >> PAGE_SHIFT) > totalram_pages()) { warn_alloc(gfp_mask, NULL, - "vmalloc size %lu allocation failure: " - "exceeds total pages", real_size); + "vmalloc error: size %lu, exceeds total pages", + real_size); return NULL; } @@ -2910,8 +2939,8 @@ again: gfp_mask, caller); if (!area) { warn_alloc(gfp_mask, NULL, - "vmalloc size %lu allocation failure: " - "vm_struct allocation failed", real_size); + "vmalloc error: size %lu, vm_struct allocation failed", + real_size); goto fail; } |