summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMike Rapoport <rppt@linux.ibm.com>2019-02-05 13:05:19 +1100
committerStephen Rothwell <sfr@canb.auug.org.au>2019-02-06 16:25:52 +1100
commitf537eec35a9556abf80972ba553fec9c79c0e53b (patch)
treedf8664b4fecd5db656829d414082b182e00bf8d8 /mm
parent8fa5f0c78e1b1af91bb598d482722f9c27e1e1dd (diff)
downloadlinux-next-f537eec35a9556abf80972ba553fec9c79c0e53b.tar.gz
treewide: add checks for the return value of memblock_alloc*()
Add check for the return value of memblock_alloc*() functions and call panic() in case of error. The panic message repeats the one used by panicing memblock allocators with adjustment of parameters to include only relevant ones. The replacement was mostly automated with semantic patches like the one below with manual massaging of format strings. @@ expression ptr, size, align; @@ ptr = memblock_alloc(size, align); + if (!ptr) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", __func__, size, align); Link: http://lkml.kernel.org/r/1548057848-15136-20-git-send-email-rppt@linux.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Reviewed-by: Guo Ren <ren_guo@c-sky.com> [c-sky] Acked-by: Paul Burton <paul.burton@mips.com> [MIPS] Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com> [s390] Reviewed-by: Juergen Gross <jgross@suse.com> [Xen] Reviewed-by: Geert Uytterhoeven <geert@linux-m68k.org> [m68k] Acked-by: Max Filippov <jcmvbkbc@gmail.com> [xtensa] Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christophe Leroy <christophe.leroy@c-s.fr> Cc: Christoph Hellwig <hch@lst.de> Cc: "David S. Miller" <davem@davemloft.net> Cc: Dennis Zhou <dennis@kernel.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Guo Ren <guoren@kernel.org> Cc: Mark Salter <msalter@redhat.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Petr Mladek <pmladek@suse.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Rob Herring <robh+dt@kernel.org> Cc: Rob Herring <robh@kernel.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Diffstat (limited to 'mm')
-rw-r--r--mm/kasan/init.c10
-rw-r--r--mm/sparse.c19
2 files changed, 25 insertions, 4 deletions
diff --git a/mm/kasan/init.c b/mm/kasan/init.c
index 45a1b5e38e1e..af907d8da9f5 100644
--- a/mm/kasan/init.c
+++ b/mm/kasan/init.c
@@ -83,8 +83,14 @@ static inline bool kasan_early_shadow_page_entry(pte_t pte)
static __init void *early_alloc(size_t size, int node)
{
- return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
- MEMBLOCK_ALLOC_ACCESSIBLE, node);
+ void *ptr = memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
+ MEMBLOCK_ALLOC_ACCESSIBLE, node);
+
+ if (!ptr)
+ panic("%s: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n",
+ __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS));
+
+ return ptr;
}
static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr,
diff --git a/mm/sparse.c b/mm/sparse.c
index 7ea5dc6c6b19..ad942427622d 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -65,11 +65,15 @@ static noinline struct mem_section __ref *sparse_index_alloc(int nid)
unsigned long array_size = SECTIONS_PER_ROOT *
sizeof(struct mem_section);
- if (slab_is_available())
+ if (slab_is_available()) {
section = kzalloc_node(array_size, GFP_KERNEL, nid);
- else
+ } else {
section = memblock_alloc_node(array_size, SMP_CACHE_BYTES,
nid);
+ if (!section)
+ panic("%s: Failed to allocate %lu bytes nid=%d\n",
+ __func__, array_size, nid);
+ }
return section;
}
@@ -218,6 +222,9 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
size = sizeof(struct mem_section*) * NR_SECTION_ROOTS;
align = 1 << (INTERNODE_CACHE_SHIFT);
mem_section = memblock_alloc(size, align);
+ if (!mem_section)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, size, align);
}
#endif
@@ -411,6 +418,10 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid,
map = memblock_alloc_try_nid(size,
PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+ if (!map)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%lx\n",
+ __func__, size, PAGE_SIZE, nid, __pa(MAX_DMA_ADDRESS));
+
return map;
}
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
@@ -425,6 +436,10 @@ static void __init sparse_buffer_init(unsigned long size, int nid)
memblock_alloc_try_nid_raw(size, PAGE_SIZE,
__pa(MAX_DMA_ADDRESS),
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+ if (!sparsemap_buf)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%lx\n",
+ __func__, size, PAGE_SIZE, nid, __pa(MAX_DMA_ADDRESS));
+
sparsemap_buf_end = sparsemap_buf + size;
}