diff options
Diffstat (limited to 'patches/mm_page_alloc__Batch_the_accounting_updates_in_the_bulk_allocator.patch')
-rw-r--r-- | patches/mm_page_alloc__Batch_the_accounting_updates_in_the_bulk_allocator.patch | 127 |
1 files changed, 127 insertions, 0 deletions
diff --git a/patches/mm_page_alloc__Batch_the_accounting_updates_in_the_bulk_allocator.patch b/patches/mm_page_alloc__Batch_the_accounting_updates_in_the_bulk_allocator.patch new file mode 100644 index 000000000000..a3810ee418c3 --- /dev/null +++ b/patches/mm_page_alloc__Batch_the_accounting_updates_in_the_bulk_allocator.patch @@ -0,0 +1,127 @@ +Subject: mm/page_alloc: Batch the accounting updates in the bulk allocator +From: Mel Gorman <mgorman@techsingularity.net> +Date: Wed May 12 10:54:54 2021 +0100 + +From: Mel Gorman <mgorman@techsingularity.net> + +Now that the zone_statistics are simple counters that do not require +special protection, the bulk allocator accounting updates can be batch +updated without adding too much complexity with protected RMW updates or +using xchg. + +Signed-off-by: Mel Gorman <mgorman@techsingularity.net> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Acked-by: Vlastimil Babka <vbabka@suse.cz> +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> + + +--- + include/linux/vmstat.h | 8 ++++++++ + mm/page_alloc.c | 30 +++++++++++++----------------- + 2 files changed, 21 insertions(+), 17 deletions(-) +--- +diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h +index fe32a2210e73..d6a6cf53b127 100644 +--- a/include/linux/vmstat.h ++++ b/include/linux/vmstat.h +@@ -247,6 +247,14 @@ __count_numa_event(struct zone *zone, enum numa_stat_item item) + raw_cpu_inc(pzstats->vm_numa_event[item]); + } + ++static inline void ++__count_numa_events(struct zone *zone, enum numa_stat_item item, long delta) ++{ ++ struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats; ++ ++ raw_cpu_add(pzstats->vm_numa_event[item], delta); ++} ++ + extern unsigned long sum_zone_node_page_state(int node, + enum zone_stat_item item); + extern unsigned long sum_zone_numa_event_state(int node, enum numa_stat_item item); +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 72984bb523e3..edf6c9a2fe79 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -3456,7 +3456,8 @@ void __putback_isolated_page(struct page *page, unsigned int order, int mt) + * + * Must be called with interrupts disabled. + */ +-static inline void zone_statistics(struct zone *preferred_zone, struct zone *z) ++static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, ++ long nr_account) + { + #ifdef CONFIG_NUMA + enum numa_stat_item local_stat = NUMA_LOCAL; +@@ -3469,12 +3470,12 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z) + local_stat = NUMA_OTHER; + + if (zone_to_nid(z) == zone_to_nid(preferred_zone)) +- __count_numa_event(z, NUMA_HIT); ++ __count_numa_events(z, NUMA_HIT, nr_account); + else { +- __count_numa_event(z, NUMA_MISS); +- __count_numa_event(preferred_zone, NUMA_FOREIGN); ++ __count_numa_events(z, NUMA_MISS, nr_account); ++ __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account); + } +- __count_numa_event(z, local_stat); ++ __count_numa_events(z, local_stat, nr_account); + #endif + } + +@@ -3520,7 +3521,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, + page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list); + if (page) { + __count_zid_vm_events(PGALLOC, page_zonenum(page), 1); +- zone_statistics(preferred_zone, zone); ++ zone_statistics(preferred_zone, zone, 1); + } + local_unlock_irqrestore(&pagesets.lock, flags); + return page; +@@ -3581,7 +3582,7 @@ struct page *rmqueue(struct zone *preferred_zone, + get_pcppage_migratetype(page)); + + __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); +- zone_statistics(preferred_zone, zone); ++ zone_statistics(preferred_zone, zone, 1); + local_irq_restore(flags); + + out: +@@ -5063,7 +5064,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, + struct alloc_context ac; + gfp_t alloc_gfp; + unsigned int alloc_flags = ALLOC_WMARK_LOW; +- int nr_populated = 0; ++ int nr_populated = 0, nr_account = 0; + + if (unlikely(nr_pages <= 0)) + return 0; +@@ -5140,15 +5141,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, + goto failed_irq; + break; + } +- +- /* +- * Ideally this would be batched but the best way to do +- * that cheaply is to first convert zone_statistics to +- * be inaccurate per-cpu counter like vm_events to avoid +- * a RMW cycle then do the accounting with IRQs enabled. +- */ +- __count_zid_vm_events(PGALLOC, zone_idx(zone), 1); +- zone_statistics(ac.preferred_zoneref->zone, zone); ++ nr_account++; + + prep_new_page(page, 0, gfp, 0); + if (page_list) +@@ -5158,6 +5151,9 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, + nr_populated++; + } + ++ __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); ++ zone_statistics(ac.preferred_zoneref->zone, zone, nr_account); ++ + local_unlock_irqrestore(&pagesets.lock, flags); + + return nr_populated; |