summaryrefslogtreecommitdiff
path: root/mm/swapfile.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-05-27 11:40:49 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-05-27 11:40:49 -0700
commit8291eaafed36f575f23951f3ce18407f480e9ecf (patch)
tree279b61422ba2df7b8579af8ccc81331de80affa8 /mm/swapfile.c
parent77fb622de1393b1d54f24f4f7ed98f84feeda502 (diff)
parentfa020a2b87d24016723fff4a4237deb612478a32 (diff)
downloadlinux-next-8291eaafed36f575f23951f3ce18407f480e9ecf.tar.gz
Merge tag 'mm-stable-2022-05-27' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull more MM updates from Andrew Morton: - Two follow-on fixes for the post-5.19 series "Use pageblock_order for cma and alloc_contig_range alignment", from Zi Yan. - A series of z3fold cleanups and fixes from Miaohe Lin. - Some memcg selftests work from Michal Koutný <mkoutny@suse.com> - Some swap fixes and cleanups from Miaohe Lin - Several individual minor fixups * tag 'mm-stable-2022-05-27' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (25 commits) mm/shmem.c: suppress shift warning mm: Kconfig: reorganize misplaced mm options mm: kasan: fix input of vmalloc_to_page() mm: fix is_pinnable_page against a cma page mm: filter out swapin error entry in shmem mapping mm/shmem: fix infinite loop when swap in shmem error at swapoff time mm/madvise: free hwpoison and swapin error entry in madvise_free_pte_range mm/swapfile: fix lost swap bits in unuse_pte() mm/swapfile: unuse_pte can map random data if swap read fails selftests: memcg: factor out common parts of memory.{low,min} tests selftests: memcg: remove protection from top level memcg selftests: memcg: adjust expected reclaim values of protected cgroups selftests: memcg: expect no low events in unprotected sibling selftests: memcg: fix compilation mm/z3fold: fix z3fold_page_migrate races with z3fold_map mm/z3fold: fix z3fold_reclaim_page races with z3fold_free mm/z3fold: always clear PAGE_CLAIMED under z3fold page lock mm/z3fold: put z3fold page back into unbuddied list when reclaim or migration fails revert "mm/z3fold.c: allow __GFP_HIGHMEM in z3fold_alloc" mm/z3fold: throw warning on failure of trylock_page in z3fold_alloc ...
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r--mm/swapfile.c21
1 files changed, 18 insertions, 3 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 94b4ff43ead0..a2e66d855b19 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1775,7 +1775,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
{
struct page *swapcache;
spinlock_t *ptl;
- pte_t *pte;
+ pte_t *pte, new_pte;
int ret = 1;
swapcache = page;
@@ -1789,6 +1789,17 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
goto out;
}
+ if (unlikely(!PageUptodate(page))) {
+ pte_t pteval;
+
+ dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
+ pteval = swp_entry_to_pte(make_swapin_error_entry(page));
+ set_pte_at(vma->vm_mm, addr, pte, pteval);
+ swap_free(entry);
+ ret = 0;
+ goto out;
+ }
+
/* See do_swap_page() */
BUG_ON(!PageAnon(page) && PageMappedToDisk(page));
BUG_ON(PageAnon(page) && PageAnonExclusive(page));
@@ -1813,8 +1824,12 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
page_add_new_anon_rmap(page, vma, addr);
lru_cache_add_inactive_or_unevictable(page, vma);
}
- set_pte_at(vma->vm_mm, addr, pte,
- pte_mkold(mk_pte(page, vma->vm_page_prot)));
+ new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot));
+ if (pte_swp_soft_dirty(*pte))
+ new_pte = pte_mksoft_dirty(new_pte);
+ if (pte_swp_uffd_wp(*pte))
+ new_pte = pte_mkuffd_wp(new_pte);
+ set_pte_at(vma->vm_mm, addr, pte, new_pte);
swap_free(entry);
out:
pte_unmap_unlock(pte, ptl);