summaryrefslogtreecommitdiff
path: root/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
diff options
context:
space:
mode:
Diffstat (limited to 'patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch')
-rw-r--r--patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch34
1 files changed, 17 insertions, 17 deletions
diff --git a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
index 91bf513c25f3..612dafd9e58e 100644
--- a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
+++ b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int page_group_by_mobility_disabled __read_mostly;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-@@ -1268,10 +1281,10 @@ static void __free_pages_ok(struct page
+@@ -1296,10 +1309,10 @@ static void __free_pages_ok(struct page
return;
migratetype = get_pfnblock_migratetype(page, pfn);
@@ -57,7 +57,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void __init __free_pages_boot_core(struct page *page, unsigned int order)
-@@ -2539,13 +2552,13 @@ void drain_zone_pages(struct zone *zone,
+@@ -2560,13 +2573,13 @@ void drain_zone_pages(struct zone *zone,
int to_drain, batch;
LIST_HEAD(dst);
@@ -73,7 +73,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (to_drain > 0)
free_pcppages_bulk(zone, &dst, false);
-@@ -2567,7 +2580,7 @@ static void drain_pages_zone(unsigned in
+@@ -2588,7 +2601,7 @@ static void drain_pages_zone(unsigned in
LIST_HEAD(dst);
int count;
@@ -82,7 +82,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
-@@ -2575,7 +2588,7 @@ static void drain_pages_zone(unsigned in
+@@ -2596,7 +2609,7 @@ static void drain_pages_zone(unsigned in
if (count)
isolate_pcp_pages(count, pcp, &dst);
@@ -91,7 +91,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (count)
free_pcppages_bulk(zone, &dst, false);
-@@ -2613,6 +2626,7 @@ void drain_local_pages(struct zone *zone
+@@ -2634,6 +2647,7 @@ void drain_local_pages(struct zone *zone
drain_pages(cpu);
}
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void drain_local_pages_wq(struct work_struct *work)
{
/*
-@@ -2626,6 +2640,7 @@ static void drain_local_pages_wq(struct
+@@ -2647,6 +2661,7 @@ static void drain_local_pages_wq(struct
drain_local_pages(NULL);
preempt_enable();
}
@@ -107,7 +107,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Spill all the per-cpu pages from all CPUs back into the buddy allocator.
-@@ -2692,7 +2707,14 @@ void drain_all_pages(struct zone *zone)
+@@ -2713,7 +2728,14 @@ void drain_all_pages(struct zone *zone)
else
cpumask_clear_cpu(cpu, &cpus_with_pcps);
}
@@ -123,7 +123,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for_each_cpu(cpu, &cpus_with_pcps) {
struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
INIT_WORK(work, drain_local_pages_wq);
-@@ -2700,6 +2722,7 @@ void drain_all_pages(struct zone *zone)
+@@ -2721,6 +2743,7 @@ void drain_all_pages(struct zone *zone)
}
for_each_cpu(cpu, &cpus_with_pcps)
flush_work(per_cpu_ptr(&pcpu_drain, cpu));
@@ -131,7 +131,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
mutex_unlock(&pcpu_drain_mutex);
}
-@@ -2819,9 +2842,9 @@ void free_unref_page(struct page *page)
+@@ -2840,9 +2863,9 @@ void free_unref_page(struct page *page)
if (!free_unref_page_prepare(page, pfn))
return;
@@ -143,7 +143,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!list_empty(&dst))
free_pcppages_bulk(zone, &dst, false);
}
-@@ -2848,7 +2871,7 @@ void free_unref_page_list(struct list_he
+@@ -2869,7 +2892,7 @@ void free_unref_page_list(struct list_he
set_page_private(page, pfn);
}
@@ -152,7 +152,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_safe(page, next, list, lru) {
unsigned long pfn = page_private(page);
enum zone_type type;
-@@ -2863,12 +2886,12 @@ void free_unref_page_list(struct list_he
+@@ -2884,12 +2907,12 @@ void free_unref_page_list(struct list_he
* a large list of pages to free.
*/
if (++batch_count == SWAP_CLUSTER_MAX) {
@@ -168,7 +168,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for (i = 0; i < __MAX_NR_ZONES; ) {
struct page *page;
-@@ -3017,7 +3040,7 @@ static struct page *rmqueue_pcplist(stru
+@@ -3038,7 +3061,7 @@ static struct page *rmqueue_pcplist(stru
struct page *page;
unsigned long flags;
@@ -177,7 +177,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
page = __rmqueue_pcplist(zone, migratetype, pcp, list);
-@@ -3025,7 +3048,7 @@ static struct page *rmqueue_pcplist(stru
+@@ -3046,7 +3069,7 @@ static struct page *rmqueue_pcplist(stru
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone);
}
@@ -186,7 +186,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return page;
}
-@@ -3052,7 +3075,7 @@ struct page *rmqueue(struct zone *prefer
+@@ -3073,7 +3096,7 @@ struct page *rmqueue(struct zone *prefer
* allocate greater than order-1 page units with __GFP_NOFAIL.
*/
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
@@ -195,7 +195,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
page = NULL;
-@@ -3072,14 +3095,14 @@ struct page *rmqueue(struct zone *prefer
+@@ -3093,14 +3116,14 @@ struct page *rmqueue(struct zone *prefer
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone);
@@ -212,7 +212,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return NULL;
}
-@@ -8085,7 +8108,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -8094,7 +8117,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
@@ -221,7 +221,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -8094,7 +8117,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -8103,7 +8126,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}