summaryrefslogtreecommitdiff
path: root/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2017-10-17 17:13:28 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2017-10-17 17:13:28 +0200
commit4aca7bf0252fb2fd2017ac18d3997776162a99c0 (patch)
treece0b44cfbff890448774f7878e962870f354bd7c /patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
parent163c99cd5cc3ffff6f0ac886236223e72ae989fb (diff)
downloadlinux-rt-4aca7bf0252fb2fd2017ac18d3997776162a99c0.tar.gz
[ANNOUNCE] v4.13.7-rt1v4.13.7-rt1-patches
Dear RT folks! I'm pleased to announce the v4.13.7-rt1 patch set. Changes since v4.11.12-rt16: - Rebase to v4.13.7 - We have now only the reader bias version of RWLOCK. In v4.11 it was possible to choose between both implementations but since the reader bias version makes no problems it is now the only implementation. - The lockdep self test is now disabled. While it produced some false positives on v4.11 it now completly locks up the system and needs investigation before re-enabling. Known issues None You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.13.7-rt1 The RT patch against v4.13.7 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.13/older/patch-4.13.7-rt1.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.13/older/patches-4.13.7-rt1.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Diffstat (limited to 'patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch')
-rw-r--r--patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch32
1 files changed, 16 insertions, 16 deletions
diff --git a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
index 2e38cdb16267..db4654d4e0a7 100644
--- a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
+++ b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int page_group_by_mobility_disabled __read_mostly;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-@@ -1258,10 +1271,10 @@ static void __free_pages_ok(struct page
+@@ -1249,10 +1262,10 @@ static void __free_pages_ok(struct page
return;
migratetype = get_pfnblock_migratetype(page, pfn);
@@ -57,7 +57,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void __init __free_pages_boot_core(struct page *page, unsigned int order)
-@@ -2288,14 +2301,14 @@ void drain_zone_pages(struct zone *zone,
+@@ -2372,14 +2385,14 @@ void drain_zone_pages(struct zone *zone,
unsigned long flags;
int to_drain, batch;
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
#endif
-@@ -2312,7 +2325,7 @@ static void drain_pages_zone(unsigned in
+@@ -2396,7 +2409,7 @@ static void drain_pages_zone(unsigned in
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
@@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
-@@ -2320,7 +2333,7 @@ static void drain_pages_zone(unsigned in
+@@ -2404,7 +2417,7 @@ static void drain_pages_zone(unsigned in
free_pcppages_bulk(zone, pcp->count, pcp);
pcp->count = 0;
}
@@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2355,6 +2368,7 @@ void drain_local_pages(struct zone *zone
+@@ -2439,6 +2452,7 @@ void drain_local_pages(struct zone *zone
drain_pages(cpu);
}
@@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void drain_local_pages_wq(struct work_struct *work)
{
/*
-@@ -2368,6 +2382,7 @@ static void drain_local_pages_wq(struct
+@@ -2452,6 +2466,7 @@ static void drain_local_pages_wq(struct
drain_local_pages(NULL);
preempt_enable();
}
@@ -108,7 +108,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Spill all the per-cpu pages from all CPUs back into the buddy allocator.
-@@ -2438,7 +2453,14 @@ void drain_all_pages(struct zone *zone)
+@@ -2522,7 +2537,14 @@ void drain_all_pages(struct zone *zone)
else
cpumask_clear_cpu(cpu, &cpus_with_pcps);
}
@@ -124,7 +124,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for_each_cpu(cpu, &cpus_with_pcps) {
struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
INIT_WORK(work, drain_local_pages_wq);
-@@ -2446,6 +2468,7 @@ void drain_all_pages(struct zone *zone)
+@@ -2530,6 +2552,7 @@ void drain_all_pages(struct zone *zone)
}
for_each_cpu(cpu, &cpus_with_pcps)
flush_work(per_cpu_ptr(&pcpu_drain, cpu));
@@ -132,7 +132,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
mutex_unlock(&pcpu_drain_mutex);
}
-@@ -2507,7 +2530,7 @@ void free_hot_cold_page(struct page *pag
+@@ -2606,7 +2629,7 @@ void free_hot_cold_page(struct page *pag
migratetype = get_pfnblock_migratetype(page, pfn);
set_pcppage_migratetype(page, migratetype);
@@ -141,7 +141,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
__count_vm_event(PGFREE);
/*
-@@ -2538,7 +2561,7 @@ void free_hot_cold_page(struct page *pag
+@@ -2637,7 +2660,7 @@ void free_hot_cold_page(struct page *pag
}
out:
@@ -150,7 +150,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2695,7 +2718,7 @@ static struct page *rmqueue_pcplist(stru
+@@ -2794,7 +2817,7 @@ static struct page *rmqueue_pcplist(stru
struct page *page;
unsigned long flags;
@@ -159,7 +159,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list);
-@@ -2703,7 +2726,7 @@ static struct page *rmqueue_pcplist(stru
+@@ -2802,7 +2825,7 @@ static struct page *rmqueue_pcplist(stru
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone);
}
@@ -168,7 +168,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return page;
}
-@@ -2730,7 +2753,7 @@ struct page *rmqueue(struct zone *prefer
+@@ -2829,7 +2852,7 @@ struct page *rmqueue(struct zone *prefer
* allocate greater than order-1 page units with __GFP_NOFAIL.
*/
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
@@ -177,7 +177,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
page = NULL;
-@@ -2750,14 +2773,14 @@ struct page *rmqueue(struct zone *prefer
+@@ -2849,14 +2872,14 @@ struct page *rmqueue(struct zone *prefer
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone);
@@ -194,7 +194,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return NULL;
}
-@@ -7591,7 +7614,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -7754,7 +7777,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
@@ -203,7 +203,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -7600,7 +7623,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -7763,7 +7786,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}