diff options
author | Liam Howlett <liam.howlett@oracle.com> | 2021-06-28 19:38:41 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-29 10:53:51 -0700 |
commit | 96d990239e31d9623fdb96e829237b997c9d3d63 (patch) | |
tree | eb30138f5bda6cc31994bd5c1523610fd1271070 /mm/mmap.c | |
parent | 78d9cf6041b968cc881fd22e25e2662d1cec4dba (diff) | |
download | linux-next-96d990239e31d9623fdb96e829237b997c9d3d63.tar.gz |
mm/mmap: introduce unlock_range() for code cleanup
Both __do_munmap() and exit_mmap() unlock a range of VMAs using almost
identical code blocks. Replace both blocks by a static inline function.
[akpm@linux-foundation.org: tweak code layout]
Link: https://lkml.kernel.org/r/20210510211021.2797427-1-Liam.Howlett@Oracle.com
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Davidlohr Bueso <dbueso@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mmap.c')
-rw-r--r-- | mm/mmap.c | 39 |
1 files changed, 20 insertions, 19 deletions
diff --git a/mm/mmap.c b/mm/mmap.c index bb128a42557e..d72716f7a0b4 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2802,6 +2802,22 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, return __split_vma(mm, vma, addr, new_below); } +static inline void +unlock_range(struct vm_area_struct *start, unsigned long limit) +{ + struct mm_struct *mm = start->vm_mm; + struct vm_area_struct *tmp = start; + + while (tmp && tmp->vm_start < limit) { + if (tmp->vm_flags & VM_LOCKED) { + mm->locked_vm -= vma_pages(tmp); + munlock_vma_pages_all(tmp); + } + + tmp = tmp->vm_next; + } +} + /* Munmap is split into 2 main parts -- this part which finds * what needs doing, and the areas themselves, which do the * work. This now handles partial unmappings. @@ -2885,17 +2901,8 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, /* * unlock any mlock()ed ranges before detaching vmas */ - if (mm->locked_vm) { - struct vm_area_struct *tmp = vma; - while (tmp && tmp->vm_start < end) { - if (tmp->vm_flags & VM_LOCKED) { - mm->locked_vm -= vma_pages(tmp); - munlock_vma_pages_all(tmp); - } - - tmp = tmp->vm_next; - } - } + if (mm->locked_vm) + unlock_range(vma, end); /* Detach vmas from rbtree */ if (!detach_vmas_to_be_unmapped(mm, vma, prev, end)) @@ -3180,14 +3187,8 @@ void exit_mmap(struct mm_struct *mm) mmap_write_unlock(mm); } - if (mm->locked_vm) { - vma = mm->mmap; - while (vma) { - if (vma->vm_flags & VM_LOCKED) - munlock_vma_pages_all(vma); - vma = vma->vm_next; - } - } + if (mm->locked_vm) + unlock_range(mm->mmap, ULONG_MAX); arch_exit_mmap(mm); |