summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2015-02-05 15:22:48 -0500
committerSteven Rostedt <rostedt@goodmis.org>2015-02-05 15:22:48 -0500
commit74a271f72a4b5dbc279b8d813d20bfdb9decdfe7 (patch)
tree3920845688e50e6026f6f2b04be656491971b930 /mm
parente9c64350d30c0d4c6dc6361a897b632444dde95d (diff)
parent7cdb8bda7211ef6c153cbeb0a9f003094a442df8 (diff)
downloadlinux-rt-74a271f72a4b5dbc279b8d813d20bfdb9decdfe7.tar.gz
Merge tag 'v3.12.37' into v3.12-rt
This is the 3.12.37 stable release Conflicts: drivers/gpu/drm/i915/i915_gem.c
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c20
-rw-r--r--mm/memory.c4
-rw-r--r--mm/mmap.c7
-rw-r--r--mm/vmscan.c24
4 files changed, 29 insertions, 26 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index e94c70380deb..bd08e9bbf347 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -897,7 +897,7 @@ EXPORT_SYMBOL(find_lock_entry);
* @mapping: the address_space to search
* @offset: the page index
* @fgp_flags: PCG flags
- * @gfp_mask: gfp mask to use if a page is to be allocated
+ * @gfp_mask: gfp mask to use for the page cache data page allocation
*
* Looks up the page cache slot at @mapping & @offset.
*
@@ -916,7 +916,7 @@ EXPORT_SYMBOL(find_lock_entry);
* If there is a page cache page, it is returned with an increased refcount.
*/
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
- int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask)
+ int fgp_flags, gfp_t gfp_mask)
{
struct page *page;
@@ -953,13 +953,11 @@ no_page:
if (!page && (fgp_flags & FGP_CREAT)) {
int err;
if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
- cache_gfp_mask |= __GFP_WRITE;
- if (fgp_flags & FGP_NOFS) {
- cache_gfp_mask &= ~__GFP_FS;
- radix_gfp_mask &= ~__GFP_FS;
- }
+ gfp_mask |= __GFP_WRITE;
+ if (fgp_flags & FGP_NOFS)
+ gfp_mask &= ~__GFP_FS;
- page = __page_cache_alloc(cache_gfp_mask);
+ page = __page_cache_alloc(gfp_mask);
if (!page)
return NULL;
@@ -970,7 +968,8 @@ no_page:
if (fgp_flags & FGP_ACCESSED)
init_page_accessed(page);
- err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask);
+ err = add_to_page_cache_lru(page, mapping, offset,
+ gfp_mask & GFP_RECLAIM_MASK);
if (unlikely(err)) {
page_cache_release(page);
page = NULL;
@@ -2462,8 +2461,7 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping,
fgp_flags |= FGP_NOFS;
page = pagecache_get_page(mapping, index, fgp_flags,
- mapping_gfp_mask(mapping),
- GFP_KERNEL);
+ mapping_gfp_mask(mapping));
if (page)
wait_for_stable_page(page);
diff --git a/mm/memory.c b/mm/memory.c
index e4aa190980a6..068dc063471c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3194,7 +3194,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
if (prev && prev->vm_end == address)
return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
- expand_downwards(vma, address - PAGE_SIZE);
+ return expand_downwards(vma, address - PAGE_SIZE);
}
if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
struct vm_area_struct *next = vma->vm_next;
@@ -3203,7 +3203,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
if (next && next->vm_start == address + PAGE_SIZE)
return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
- expand_upwards(vma, address + PAGE_SIZE);
+ return expand_upwards(vma, address + PAGE_SIZE);
}
return 0;
}
diff --git a/mm/mmap.c b/mm/mmap.c
index 15e07d5a75cb..441602d7259a 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2049,14 +2049,17 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
{
struct mm_struct *mm = vma->vm_mm;
struct rlimit *rlim = current->signal->rlim;
- unsigned long new_start;
+ unsigned long new_start, actual_size;
/* address space limit tests */
if (!may_expand_vm(mm, grow))
return -ENOMEM;
/* Stack limit test */
- if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+ actual_size = size;
+ if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
+ actual_size -= PAGE_SIZE;
+ if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
return -ENOMEM;
/* mlock limit tests */
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5461d02ea718..ee8363f73cab 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2868,18 +2868,20 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
return false;
/*
- * There is a potential race between when kswapd checks its watermarks
- * and a process gets throttled. There is also a potential race if
- * processes get throttled, kswapd wakes, a large process exits therby
- * balancing the zones that causes kswapd to miss a wakeup. If kswapd
- * is going to sleep, no process should be sleeping on pfmemalloc_wait
- * so wake them now if necessary. If necessary, processes will wake
- * kswapd and get throttled again
+ * The throttled processes are normally woken up in balance_pgdat() as
+ * soon as pfmemalloc_watermark_ok() is true. But there is a potential
+ * race between when kswapd checks the watermarks and a process gets
+ * throttled. There is also a potential race if processes get
+ * throttled, kswapd wakes, a large process exits thereby balancing the
+ * zones, which causes kswapd to exit balance_pgdat() before reaching
+ * the wake up checks. If kswapd is going to sleep, no process should
+ * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
+ * the wake up is premature, processes will wake kswapd and get
+ * throttled again. The difference from wake ups in balance_pgdat() is
+ * that here we are under prepare_to_wait().
*/
- if (waitqueue_active(&pgdat->pfmemalloc_wait)) {
- wake_up(&pgdat->pfmemalloc_wait);
- return false;
- }
+ if (waitqueue_active(&pgdat->pfmemalloc_wait))
+ wake_up_all(&pgdat->pfmemalloc_wait);
return pgdat_balanced(pgdat, order, classzone_idx);
}