summaryrefslogtreecommitdiff
path: root/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2019-07-11 17:27:49 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2019-07-11 17:27:49 +0200
commiteb3c3c3494cfc4f96da48b032740632250a35314 (patch)
tree7da23bf901e9125e0ac1535394053a42c49e0fa2 /patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
parent72190d8535612c54d056dd5916b77995e93d0268 (diff)
downloadlinux-rt-eb3c3c3494cfc4f96da48b032740632250a35314.tar.gz
[ANNOUNCE] v5.2-rt1v5.2-rt1-patches
Dear RT folks! I'm pleased to announce the v5.2-rt1 patch set. Changes since v5.0.21-rt16: - Rebase to v5.2 First release in the v5.2 series. I had in testing for the last few days and did not notice anything unusual. Known issues - rcutorture is currently broken on -RT. Reported by Juri Lelli. You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.2-rt1 The RT patch against v5.2 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patch-5.2-rt1.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Diffstat (limited to 'patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch')
-rw-r--r--patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch20
1 files changed, 10 insertions, 10 deletions
diff --git a/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
index ddd757503689..b893199ac801 100644
--- a/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
+++ b/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -1135,8 +1135,8 @@ static inline void prefetch_buddy(struct
+@@ -1192,8 +1192,8 @@ static inline void prefetch_buddy(struct
* And clear the zone's pages_scanned counter, to hold off the "all pages are
* pinned" detection logic.
*/
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
bool isolated_pageblocks;
struct page *page, *tmp;
-@@ -1151,12 +1151,27 @@ static void free_pcppages_bulk(struct zo
+@@ -1208,12 +1208,27 @@ static void free_pcppages_bulk(struct zo
*/
list_for_each_entry_safe(page, tmp, head, lru) {
int mt = get_pcppage_migratetype(page);
@@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
__free_one_page(page, page_to_pfn(page), zone, 0, mt);
trace_mm_page_pcpu_drain(page, 0, mt);
}
-@@ -2639,7 +2654,7 @@ void drain_zone_pages(struct zone *zone,
+@@ -2742,7 +2757,7 @@ void drain_zone_pages(struct zone *zone,
local_irq_restore(flags);
if (to_drain > 0)
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#endif
-@@ -2669,7 +2684,7 @@ static void drain_pages_zone(unsigned in
+@@ -2772,7 +2787,7 @@ static void drain_pages_zone(unsigned in
local_irq_restore(flags);
if (count)
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2868,7 +2883,8 @@ static bool free_unref_page_prepare(stru
+@@ -2971,7 +2986,8 @@ static bool free_unref_page_prepare(stru
return true;
}
@@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp;
-@@ -2897,10 +2913,8 @@ static void free_unref_page_commit(struc
+@@ -3000,10 +3016,8 @@ static void free_unref_page_commit(struc
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
@@ -96,7 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -2911,13 +2925,17 @@ void free_unref_page(struct page *page)
+@@ -3014,13 +3028,17 @@ void free_unref_page(struct page *page)
{
unsigned long flags;
unsigned long pfn = page_to_pfn(page);
@@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2928,6 +2946,11 @@ void free_unref_page_list(struct list_he
+@@ -3031,6 +3049,11 @@ void free_unref_page_list(struct list_he
struct page *page, *next;
unsigned long flags, pfn;
int batch_count = 0;
@@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Prepare pages for freeing */
list_for_each_entry_safe(page, next, list, lru) {
-@@ -2940,10 +2963,12 @@ void free_unref_page_list(struct list_he
+@@ -3043,10 +3066,12 @@ void free_unref_page_list(struct list_he
local_irq_save(flags);
list_for_each_entry_safe(page, next, list, lru) {
unsigned long pfn = page_private(page);
@@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Guard against excessive IRQ disabled times when we get
-@@ -2956,6 +2981,21 @@ void free_unref_page_list(struct list_he
+@@ -3059,6 +3084,21 @@ void free_unref_page_list(struct list_he
}
}
local_irq_restore(flags);