diff options
author | Peter Zijlstra <peterz@infradead.org> | 2018-05-28 15:24:20 +0200 |
---|---|---|
committer | Luis Claudio R. Goncalves <lgoncalv@redhat.com> | 2023-02-18 22:05:16 -0300 |
commit | ab7f011df3cdb50d52cd855db4ff60b609c18853 (patch) | |
tree | 60d3c6efa03f9bb20955e70be4cf52078995c300 /mm | |
parent | 6f03b832c4f958b347785b6466f940dacb832c47 (diff) | |
download | linux-rt-ab7f011df3cdb50d52cd855db4ff60b609c18853.tar.gz |
Split IRQ-off and zone->lock while freeing pages from PCP list #1
Split the IRQ-off section while accessing the PCP list from zone->lock
while freeing pages.
Introcude isolate_pcp_pages() which separates the pages from the PCP
list onto a temporary list and then free the temporary list via
free_pcppages_bulk().
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 81 |
1 files changed, 51 insertions, 30 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1fd41b91a1a8..bf59c26446b5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1331,7 +1331,7 @@ static inline void prefetch_buddy(struct page *page) } /* - * Frees a number of pages from the PCP lists + * Frees a number of pages which have been collected from the pcp lists. * Assumes all pages on list are in same zone, and of same order. * count is the number of pages to free. * @@ -1342,14 +1342,40 @@ static inline void prefetch_buddy(struct page *page) * pinned" detection logic. */ static void free_pcppages_bulk(struct zone *zone, int count, - struct per_cpu_pages *pcp) + struct list_head *head) +{ + bool isolated_pageblocks; + struct page *page, *tmp; + unsigned long flags; + + spin_lock_irqsave(&zone->lock, flags); + isolated_pageblocks = has_isolate_pageblock(zone); + + /* + * Use safe version since after __free_one_page(), + * page->lru.next will not point to original list. + */ + list_for_each_entry_safe(page, tmp, head, lru) { + int mt = get_pcppage_migratetype(page); + /* MIGRATE_ISOLATE page should not go to pcplists */ + VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); + /* Pageblock could have been isolated meanwhile */ + if (unlikely(isolated_pageblocks)) + mt = get_pageblock_migratetype(page); + + __free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE); + trace_mm_page_pcpu_drain(page, 0, mt); + } + spin_unlock_irqrestore(&zone->lock, flags); +} + +static void isolate_pcp_pages(int count, struct per_cpu_pages *pcp, + struct list_head *dst) { int migratetype = 0; int batch_free = 0; int prefetch_nr = 0; - bool isolated_pageblocks; - struct page *page, *tmp; - LIST_HEAD(head); + struct page *page; /* * Ensure proper count is passed which otherwise would stuck in the @@ -1386,7 +1412,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, if (bulkfree_pcp_prepare(page)) continue; - list_add_tail(&page->lru, &head); + list_add_tail(&page->lru, dst); /* * We are going to put the page back to the global @@ -1401,26 +1427,6 @@ static void free_pcppages_bulk(struct zone *zone, int count, prefetch_buddy(page); } while (--count && --batch_free && !list_empty(list)); } - - spin_lock(&zone->lock); - isolated_pageblocks = has_isolate_pageblock(zone); - - /* - * Use safe version since after __free_one_page(), - * page->lru.next will not point to original list. - */ - list_for_each_entry_safe(page, tmp, &head, lru) { - int mt = get_pcppage_migratetype(page); - /* MIGRATE_ISOLATE page should not go to pcplists */ - VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); - /* Pageblock could have been isolated meanwhile */ - if (unlikely(isolated_pageblocks)) - mt = get_pageblock_migratetype(page); - - __free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE); - trace_mm_page_pcpu_drain(page, 0, mt); - } - spin_unlock(&zone->lock); } static void free_one_page(struct zone *zone, @@ -2938,13 +2944,18 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) { unsigned long flags; int to_drain, batch; + LIST_HEAD(dst); local_irq_save(flags); batch = READ_ONCE(pcp->batch); to_drain = min(pcp->count, batch); if (to_drain > 0) - free_pcppages_bulk(zone, to_drain, pcp); + isolate_pcp_pages(to_drain, pcp, &dst); + local_irq_restore(flags); + + if (to_drain > 0) + free_pcppages_bulk(zone, to_drain, &dst); } #endif @@ -2960,14 +2971,21 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone) unsigned long flags; struct per_cpu_pageset *pset; struct per_cpu_pages *pcp; + LIST_HEAD(dst); + int count; local_irq_save(flags); pset = per_cpu_ptr(zone->pageset, cpu); pcp = &pset->pcp; - if (pcp->count) - free_pcppages_bulk(zone, pcp->count, pcp); + count = pcp->count; + if (count) + isolate_pcp_pages(count, pcp, &dst); + local_irq_restore(flags); + + if (count) + free_pcppages_bulk(zone, count, &dst); } /* @@ -3196,7 +3214,10 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn) pcp->count++; if (pcp->count >= pcp->high) { unsigned long batch = READ_ONCE(pcp->batch); - free_pcppages_bulk(zone, batch, pcp); + LIST_HEAD(dst); + + isolate_pcp_pages(batch, pcp, &dst); + free_pcppages_bulk(zone, batch, &dst); } } |