diff options
Diffstat (limited to 'mm/damon/paddr.c')
-rw-r--r-- | mm/damon/paddr.c | 163 |
1 files changed, 101 insertions, 62 deletions
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index e1a4315c4be6..607bb69e526c 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -33,17 +33,15 @@ static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma, static void damon_pa_mkold(unsigned long paddr) { - struct folio *folio; - struct page *page = damon_get_page(PHYS_PFN(paddr)); + struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); struct rmap_walk_control rwc = { .rmap_one = __damon_pa_mkold, .anon_lock = folio_lock_anon_vma_read, }; bool need_lock; - if (!page) + if (!folio) return; - folio = page_folio(page); if (!folio_mapped(folio) || !folio_raw_mapping(folio)) { folio_set_idle(folio); @@ -81,69 +79,57 @@ static void damon_pa_prepare_access_checks(struct damon_ctx *ctx) } } -struct damon_pa_access_chk_result { - unsigned long page_sz; - bool accessed; -}; - static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma, unsigned long addr, void *arg) { - struct damon_pa_access_chk_result *result = arg; + bool *accessed = arg; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); - result->accessed = false; - result->page_sz = PAGE_SIZE; + *accessed = false; while (page_vma_mapped_walk(&pvmw)) { addr = pvmw.address; if (pvmw.pte) { - result->accessed = pte_young(*pvmw.pte) || + *accessed = pte_young(*pvmw.pte) || !folio_test_idle(folio) || mmu_notifier_test_young(vma->vm_mm, addr); } else { #ifdef CONFIG_TRANSPARENT_HUGEPAGE - result->accessed = pmd_young(*pvmw.pmd) || + *accessed = pmd_young(*pvmw.pmd) || !folio_test_idle(folio) || mmu_notifier_test_young(vma->vm_mm, addr); - result->page_sz = HPAGE_PMD_SIZE; #else WARN_ON_ONCE(1); #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ } - if (result->accessed) { + if (*accessed) { page_vma_mapped_walk_done(&pvmw); break; } } /* If accessed, stop walking */ - return !result->accessed; + return *accessed == false; } -static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz) +static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz) { - struct folio *folio; - struct page *page = damon_get_page(PHYS_PFN(paddr)); - struct damon_pa_access_chk_result result = { - .page_sz = PAGE_SIZE, - .accessed = false, - }; + struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); + bool accessed = false; struct rmap_walk_control rwc = { - .arg = &result, + .arg = &accessed, .rmap_one = __damon_pa_young, .anon_lock = folio_lock_anon_vma_read, }; bool need_lock; - if (!page) + if (!folio) return false; - folio = page_folio(page); if (!folio_mapped(folio) || !folio_raw_mapping(folio)) { if (folio_test_idle(folio)) - result.accessed = false; + accessed = false; else - result.accessed = true; + accessed = true; folio_put(folio); goto out; } @@ -161,25 +147,25 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz) folio_put(folio); out: - *page_sz = result.page_sz; - return result.accessed; + *folio_sz = folio_size(folio); + return accessed; } static void __damon_pa_check_access(struct damon_region *r) { static unsigned long last_addr; - static unsigned long last_page_sz = PAGE_SIZE; + static unsigned long last_folio_sz = PAGE_SIZE; static bool last_accessed; /* If the region is in the last checked page, reuse the result */ - if (ALIGN_DOWN(last_addr, last_page_sz) == - ALIGN_DOWN(r->sampling_addr, last_page_sz)) { + if (ALIGN_DOWN(last_addr, last_folio_sz) == + ALIGN_DOWN(r->sampling_addr, last_folio_sz)) { if (last_accessed) r->nr_accesses++; return; } - last_accessed = damon_pa_young(r->sampling_addr, &last_page_sz); + last_accessed = damon_pa_young(r->sampling_addr, &last_folio_sz); if (last_accessed) r->nr_accesses++; @@ -202,63 +188,116 @@ static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx) return max_nr_accesses; } -static unsigned long damon_pa_pageout(struct damon_region *r) +static bool __damos_pa_filter_out(struct damos_filter *filter, + struct folio *folio) +{ + bool matched = false; + struct mem_cgroup *memcg; + + switch (filter->type) { + case DAMOS_FILTER_TYPE_ANON: + matched = folio_test_anon(folio); + break; + case DAMOS_FILTER_TYPE_MEMCG: + rcu_read_lock(); + memcg = folio_memcg_check(folio); + if (!memcg) + matched = false; + else + matched = filter->memcg_id == mem_cgroup_id(memcg); + rcu_read_unlock(); + break; + default: + break; + } + + return matched == filter->matching; +} + +/* + * damos_pa_filter_out - Return true if the page should be filtered out. + */ +static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio) +{ + struct damos_filter *filter; + + damos_for_each_filter(filter, scheme) { + if (__damos_pa_filter_out(filter, folio)) + return true; + } + return false; +} + +static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s) { unsigned long addr, applied; - LIST_HEAD(page_list); + LIST_HEAD(folio_list); for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) { - struct page *page = damon_get_page(PHYS_PFN(addr)); + struct folio *folio = damon_get_folio(PHYS_PFN(addr)); + + if (!folio) + continue; - if (!page) + if (damos_pa_filter_out(s, folio)) { + folio_put(folio); continue; + } - ClearPageReferenced(page); - test_and_clear_page_young(page); - if (isolate_lru_page(page)) { - put_page(page); + folio_clear_referenced(folio); + folio_test_clear_young(folio); + if (!folio_isolate_lru(folio)) { + folio_put(folio); continue; } - if (PageUnevictable(page)) { - putback_lru_page(page); + if (folio_test_unevictable(folio)) { + folio_putback_lru(folio); } else { - list_add(&page->lru, &page_list); - put_page(page); + list_add(&folio->lru, &folio_list); + folio_put(folio); } } - applied = reclaim_pages(&page_list); + applied = reclaim_pages(&folio_list); cond_resched(); return applied * PAGE_SIZE; } static inline unsigned long damon_pa_mark_accessed_or_deactivate( - struct damon_region *r, bool mark_accessed) + struct damon_region *r, struct damos *s, bool mark_accessed) { unsigned long addr, applied = 0; for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) { - struct page *page = damon_get_page(PHYS_PFN(addr)); + struct folio *folio = damon_get_folio(PHYS_PFN(addr)); + + if (!folio) + continue; - if (!page) + if (damos_pa_filter_out(s, folio)) { + folio_put(folio); continue; + } + if (mark_accessed) - mark_page_accessed(page); + folio_mark_accessed(folio); else - deactivate_page(page); - put_page(page); - applied++; + folio_deactivate(folio); + folio_put(folio); + applied += folio_nr_pages(folio); } return applied * PAGE_SIZE; } -static unsigned long damon_pa_mark_accessed(struct damon_region *r) +static unsigned long damon_pa_mark_accessed(struct damon_region *r, + struct damos *s) { - return damon_pa_mark_accessed_or_deactivate(r, true); + return damon_pa_mark_accessed_or_deactivate(r, s, true); } -static unsigned long damon_pa_deactivate_pages(struct damon_region *r) +static unsigned long damon_pa_deactivate_pages(struct damon_region *r, + struct damos *s) { - return damon_pa_mark_accessed_or_deactivate(r, false); + return damon_pa_mark_accessed_or_deactivate(r, s, false); } static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx, @@ -267,11 +306,11 @@ static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx, { switch (scheme->action) { case DAMOS_PAGEOUT: - return damon_pa_pageout(r); + return damon_pa_pageout(r, scheme); case DAMOS_LRU_PRIO: - return damon_pa_mark_accessed(r); + return damon_pa_mark_accessed(r, scheme); case DAMOS_LRU_DEPRIO: - return damon_pa_deactivate_pages(r); + return damon_pa_deactivate_pages(r, scheme); case DAMOS_STAT: break; default: |