diff options
Diffstat (limited to 'mm/mmap.c')
-rw-r--r-- | mm/mmap.c | 102 |
1 files changed, 54 insertions, 48 deletions
diff --git a/mm/mmap.c b/mm/mmap.c index 1811918706d8..3277002f89bf 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -62,6 +62,14 @@ #define arch_mmap_check(addr, len, flags) (0) #endif +#ifndef arch_get_mmap_end +#define arch_get_mmap_end(addr) (TASK_SIZE) +#endif + +#ifndef arch_get_mmap_base +#define arch_get_mmap_base(addr, base) (base) +#endif + #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN; const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX; @@ -132,7 +140,7 @@ void vma_set_page_prot(struct vm_area_struct *vma) vm_flags &= ~VM_SHARED; vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags); } - /* remove_protection_ptes reads vma->vm_page_prot without mmap_sem */ + /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */ WRITE_ONCE(vma->vm_page_prot, vm_page_prot); } @@ -198,7 +206,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) bool downgraded = false; LIST_HEAD(uf); - if (down_write_killable(&mm->mmap_sem)) + if (mmap_write_lock_killable(mm)) return -EINTR; origbrk = mm->brk; @@ -238,14 +246,14 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) /* * Always allow shrinking brk. - * __do_munmap() may downgrade mmap_sem to read. + * __do_munmap() may downgrade mmap_lock to read. */ if (brk <= mm->brk) { int ret; /* - * mm->brk must to be protected by write mmap_sem so update it - * before downgrading mmap_sem. When __do_munmap() fails, + * mm->brk must to be protected by write mmap_lock so update it + * before downgrading mmap_lock. When __do_munmap() fails, * mm->brk will be restored from origbrk. */ mm->brk = brk; @@ -272,9 +280,9 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) success: populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0; if (downgraded) - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); else - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); userfaultfd_unmap_complete(mm, &uf); if (populate) mm_populate(oldbrk, newbrk - oldbrk); @@ -282,7 +290,7 @@ success: out: retval = origbrk; - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return retval; } @@ -505,7 +513,7 @@ static __always_inline void vma_rb_erase(struct vm_area_struct *vma, * After the update, the vma will be reinserted using * anon_vma_interval_tree_post_update_vma(). * - * The entire update must be protected by exclusive mmap_sem and by + * The entire update must be protected by exclusive mmap_lock and by * the root anon_vma's mutex. */ static inline void @@ -1361,7 +1369,7 @@ static inline bool file_mmap_ok(struct file *file, struct inode *inode, } /* - * The caller must hold down_write(¤t->mm->mmap_sem). + * The caller must write-lock current->mm->mmap_lock. */ unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, @@ -1369,6 +1377,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long pgoff, unsigned long *populate, struct list_head *uf) { + const unsigned long mmap_end = arch_get_mmap_end(addr); struct mm_struct *mm = current->mm; int pkey = 0; @@ -1391,8 +1400,12 @@ unsigned long do_mmap(struct file *file, unsigned long addr, if (flags & MAP_FIXED_NOREPLACE) flags |= MAP_FIXED; - if (!(flags & MAP_FIXED)) + if (flags & MAP_FIXED) { + if ((addr < mmap_min_addr) || (addr > mmap_end)) + return -ENOMEM; + } else { addr = round_hint_to_min(addr); + } /* Careful about overflows.. */ len = PAGE_ALIGN(len); @@ -2089,14 +2102,6 @@ unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info) return addr; } -#ifndef arch_get_mmap_end -#define arch_get_mmap_end(addr) (TASK_SIZE) -#endif - -#ifndef arch_get_mmap_base -#define arch_get_mmap_base(addr, base) (base) -#endif - /* Get an address range which is currently unmapped. * For shmat() with addr=0. * @@ -2208,12 +2213,13 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); + const unsigned long mmap_end = arch_get_mmap_end(addr); unsigned long error = arch_mmap_check(addr, len, flags); if (error) return error; /* Careful about overflows.. */ - if (len > TASK_SIZE) + if (len > mmap_end - mmap_min_addr) return -ENOMEM; get_area = current->mm->get_unmapped_area; @@ -2234,7 +2240,7 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, if (IS_ERR_VALUE(addr)) return addr; - if (addr > TASK_SIZE - len) + if ((addr < mmap_min_addr) || (addr > mmap_end - len)) return -ENOMEM; if (offset_in_page(addr)) return -EINVAL; @@ -2386,7 +2392,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) /* * vma->vm_start/vm_end cannot change under us because the caller - * is required to hold the mmap_sem in read mode. We need the + * is required to hold the mmap_lock in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. */ anon_vma_lock_write(vma->anon_vma); @@ -2404,7 +2410,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) if (!error) { /* * vma_gap_update() doesn't support concurrent - * updates, but we only hold a shared mmap_sem + * updates, but we only hold a shared mmap_lock * lock here, so we need to protect against * concurrent vma expansions. * anon_vma_lock_write() doesn't help here, as @@ -2466,7 +2472,7 @@ int expand_downwards(struct vm_area_struct *vma, /* * vma->vm_start/vm_end cannot change under us because the caller - * is required to hold the mmap_sem in read mode. We need the + * is required to hold the mmap_lock in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. */ anon_vma_lock_write(vma->anon_vma); @@ -2484,7 +2490,7 @@ int expand_downwards(struct vm_area_struct *vma, if (!error) { /* * vma_gap_update() doesn't support concurrent - * updates, but we only hold a shared mmap_sem + * updates, but we only hold a shared mmap_lock * lock here, so we need to protect against * concurrent vma expansions. * anon_vma_lock_write() doesn't help here, as @@ -2843,7 +2849,7 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, detach_vmas_to_be_unmapped(mm, vma, prev, end); if (downgrade) - downgrade_write(&mm->mmap_sem); + mmap_write_downgrade(mm); unmap_region(mm, vma, prev, start, end); @@ -2865,20 +2871,20 @@ static int __vm_munmap(unsigned long start, size_t len, bool downgrade) struct mm_struct *mm = current->mm; LIST_HEAD(uf); - if (down_write_killable(&mm->mmap_sem)) + if (mmap_write_lock_killable(mm)) return -EINTR; ret = __do_munmap(mm, start, len, &uf, downgrade); /* - * Returning 1 indicates mmap_sem is downgraded. + * Returning 1 indicates mmap_lock is downgraded. * But 1 is not legal return value of vm_munmap() and munmap(), reset * it to 0 before return. */ if (ret == 1) { - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); ret = 0; } else - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); userfaultfd_unmap_complete(mm, &uf); return ret; @@ -2926,7 +2932,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, if (pgoff + (size >> PAGE_SHIFT) < pgoff) return ret; - if (down_write_killable(&mm->mmap_sem)) + if (mmap_write_lock_killable(mm)) return -EINTR; vma = find_vma(mm, start); @@ -2989,7 +2995,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, prot, flags, pgoff, &populate, NULL); fput(file); out: - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); if (populate) mm_populate(ret, populate); if (!IS_ERR_VALUE(ret)) @@ -3089,12 +3095,12 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) if (!len) return 0; - if (down_write_killable(&mm->mmap_sem)) + if (mmap_write_lock_killable(mm)) return -EINTR; ret = do_brk_flags(addr, len, flags, &uf); populate = ((mm->def_flags & VM_LOCKED) != 0); - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); userfaultfd_unmap_complete(mm, &uf); if (populate && !ret) mm_populate(addr, len); @@ -3122,12 +3128,12 @@ void exit_mmap(struct mm_struct *mm) /* * Manually reap the mm to free as much memory as possible. * Then, as the oom reaper does, set MMF_OOM_SKIP to disregard - * this mm from further consideration. Taking mm->mmap_sem for + * this mm from further consideration. Taking mm->mmap_lock for * write after setting MMF_OOM_SKIP will guarantee that the oom - * reaper will not run on this mm again after mmap_sem is + * reaper will not run on this mm again after mmap_lock is * dropped. * - * Nothing can be holding mm->mmap_sem here and the above call + * Nothing can be holding mm->mmap_lock here and the above call * to mmu_notifier_release(mm) ensures mmu notifier callbacks in * __oom_reap_task_mm() will not block. * @@ -3138,8 +3144,8 @@ void exit_mmap(struct mm_struct *mm) (void)__oom_reap_task_mm(mm); set_bit(MMF_OOM_SKIP, &mm->flags); - down_write(&mm->mmap_sem); - up_write(&mm->mmap_sem); + mmap_write_lock(mm); + mmap_write_unlock(mm); } if (mm->locked_vm) { @@ -3452,7 +3458,7 @@ bool vma_is_special_mapping(const struct vm_area_struct *vma, } /* - * Called with mm->mmap_sem held for writing. + * Called with mm->mmap_lock held for writing. * Insert a new vma covering the given region, with the given flags. * Its pages are supplied by the given array of struct page *. * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. @@ -3489,7 +3495,7 @@ static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) * The LSB of head.next can't change from under us * because we hold the mm_all_locks_mutex. */ - down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem); + down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock); /* * We can safely modify head.next after taking the * anon_vma->root->rwsem. If some other vma in this mm shares @@ -3519,7 +3525,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) */ if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) BUG(); - down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_sem); + down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock); } } @@ -3528,11 +3534,11 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) * operations that could ever happen on a certain mm. This includes * vmtruncate, try_to_unmap, and all page faults. * - * The caller must take the mmap_sem in write mode before calling + * The caller must take the mmap_lock in write mode before calling * mm_take_all_locks(). The caller isn't allowed to release the - * mmap_sem until mm_drop_all_locks() returns. + * mmap_lock until mm_drop_all_locks() returns. * - * mmap_sem in write mode is required in order to block all operations + * mmap_lock in write mode is required in order to block all operations * that could modify pagetables and free pages without need of * altering the vma layout. It's also needed in write mode to avoid new * anon_vmas to be associated with existing vmas. @@ -3565,7 +3571,7 @@ int mm_take_all_locks(struct mm_struct *mm) struct vm_area_struct *vma; struct anon_vma_chain *avc; - BUG_ON(down_read_trylock(&mm->mmap_sem)); + BUG_ON(mmap_read_trylock(mm)); mutex_lock(&mm_all_locks_mutex); @@ -3637,7 +3643,7 @@ static void vm_unlock_mapping(struct address_space *mapping) } /* - * The mmap_sem cannot be released by the caller until + * The mmap_lock cannot be released by the caller until * mm_drop_all_locks() returns. */ void mm_drop_all_locks(struct mm_struct *mm) @@ -3645,7 +3651,7 @@ void mm_drop_all_locks(struct mm_struct *mm) struct vm_area_struct *vma; struct anon_vma_chain *avc; - BUG_ON(down_read_trylock(&mm->mmap_sem)); + BUG_ON(mmap_read_trylock(mm)); BUG_ON(!mutex_is_locked(&mm_all_locks_mutex)); for (vma = mm->mmap; vma; vma = vma->vm_next) { |