diff options
author | Ingo Molnar <mingo@kernel.org> | 2020-10-09 08:55:17 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2020-10-09 08:55:17 +0200 |
commit | e705d397965811ac528d7213b42d74ffe43caf38 (patch) | |
tree | 8a5bbe85cc42e64992b97859976e307027f83e33 /mm/swap.c | |
parent | d89d5f855f84ccf3f7e648813b4bb95c780bd7cd (diff) | |
parent | baffd723e44dc3d7f84f0b8f1fe1ece00ddd2710 (diff) | |
download | linux-next-e705d397965811ac528d7213b42d74ffe43caf38.tar.gz |
Merge branch 'locking/urgent' into locking/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/swap.c')
-rw-r--r-- | mm/swap.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/mm/swap.c b/mm/swap.c index a1ec807e325d..65ef7e3525bf 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -494,14 +494,14 @@ void lru_cache_add_inactive_or_unevictable(struct page *page, unevictable = (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED; if (unlikely(unevictable) && !TestSetPageMlocked(page)) { + int nr_pages = thp_nr_pages(page); /* * We use the irq-unsafe __mod_zone_page_stat because this * counter is not modified from interrupt context, and the pte * lock is held(spinlock), which implies preemption disabled. */ - __mod_zone_page_state(page_zone(page), NR_MLOCK, - thp_nr_pages(page)); - count_vm_event(UNEVICTABLE_PGMLOCKED); + __mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages); + count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages); } lru_cache_add(page); } |