diff options
author | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2020-02-04 17:07:39 +0100 |
---|---|---|
committer | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2020-02-04 17:07:39 +0100 |
commit | 9cf4d2227a896a0aaf753dd8875167e0ab391fbc (patch) | |
tree | 9dc0196a476be638e738057ea3cd278e9e02e545 /patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch | |
parent | 0d9814c65b67abf558330ed09846d06b797d1840 (diff) | |
download | linux-rt-9cf4d2227a896a0aaf753dd8875167e0ab391fbc.tar.gz |
[ANNOUNCE] v5.4.17-rt9v5.4.17-rt9-patches
Dear RT folks!
I'm pleased to announce the v5.4.17-rt9 patch set.
Changes since v5.4.17-rt8:
- A rework of percpu-rwsem locking. The fs core was using a
percpu-rwsem and returned with acquired lock to userland during
`fsfreeze' which led warnings. On !RT the warnings were disabled but
the same lockdep trick did not work on RT.
Reported by Juri Lelli, patch(es) by Peter Zijlstra.
- Include a header file the `current' macro to not break an allmod
build on ARM.
- A tweak to migrate_enable() to not having to wait until
stop_one_cpu_nowait() finishes in case CPU-mask changed during
migrate_disable() and the CPU has to be changed. Patch by Scott
Wood.
- Drop a lock earlier in mm/memcontrol. Not a bug but there is no need
for the additional locked section. Patch by Matt Fleming.
Known issues
- None
The delta patch against v5.4.17-rt8 is appended below and can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.4/incr/patch-5.4.17-rt8-rt9.patch.xz
You can get this release via the git tree at:
git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.4.17-rt9
The RT patch against v5.4.17 can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.4/older/patch-5.4.17-rt9.patch.xz
The split quilt queue is available at:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.4/older/patches-5.4.17-rt9.tar.xz
Sebastian
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Diffstat (limited to 'patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch')
-rw-r--r-- | patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch index 7da1092ba766..cb3958983d96 100644 --- a/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch +++ b/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch @@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/mm/page_alloc.c +++ b/mm/page_alloc.c -@@ -1253,8 +1253,8 @@ static inline void prefetch_buddy(struct +@@ -1246,8 +1246,8 @@ static inline void prefetch_buddy(struct * And clear the zone's pages_scanned counter, to hold off the "all pages are * pinned" detection logic. */ @@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { bool isolated_pageblocks; struct page *page, *tmp; -@@ -1269,12 +1269,27 @@ static void free_pcppages_bulk(struct zo +@@ -1262,12 +1262,27 @@ static void free_pcppages_bulk(struct zo */ list_for_each_entry_safe(page, tmp, head, lru) { int mt = get_pcppage_migratetype(page); @@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> __free_one_page(page, page_to_pfn(page), zone, 0, mt); trace_mm_page_pcpu_drain(page, 0, mt); } -@@ -2817,7 +2832,7 @@ void drain_zone_pages(struct zone *zone, +@@ -2806,7 +2821,7 @@ void drain_zone_pages(struct zone *zone, local_irq_restore(flags); if (to_drain > 0) @@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } #endif -@@ -2847,7 +2862,7 @@ static void drain_pages_zone(unsigned in +@@ -2836,7 +2851,7 @@ static void drain_pages_zone(unsigned in local_irq_restore(flags); if (count) @@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } /* -@@ -3046,7 +3061,8 @@ static bool free_unref_page_prepare(stru +@@ -3035,7 +3050,8 @@ static bool free_unref_page_prepare(stru return true; } @@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { struct zone *zone = page_zone(page); struct per_cpu_pages *pcp; -@@ -3075,10 +3091,8 @@ static void free_unref_page_commit(struc +@@ -3064,10 +3080,8 @@ static void free_unref_page_commit(struc pcp->count++; if (pcp->count >= pcp->high) { unsigned long batch = READ_ONCE(pcp->batch); @@ -96,7 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } } -@@ -3089,13 +3103,17 @@ void free_unref_page(struct page *page) +@@ -3078,13 +3092,17 @@ void free_unref_page(struct page *page) { unsigned long flags; unsigned long pfn = page_to_pfn(page); @@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } /* -@@ -3106,6 +3124,11 @@ void free_unref_page_list(struct list_he +@@ -3095,6 +3113,11 @@ void free_unref_page_list(struct list_he struct page *page, *next; unsigned long flags, pfn; int batch_count = 0; @@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* Prepare pages for freeing */ list_for_each_entry_safe(page, next, list, lru) { -@@ -3118,10 +3141,12 @@ void free_unref_page_list(struct list_he +@@ -3107,10 +3130,12 @@ void free_unref_page_list(struct list_he local_irq_save(flags); list_for_each_entry_safe(page, next, list, lru) { unsigned long pfn = page_private(page); @@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * Guard against excessive IRQ disabled times when we get -@@ -3134,6 +3159,21 @@ void free_unref_page_list(struct list_he +@@ -3123,6 +3148,21 @@ void free_unref_page_list(struct list_he } } local_irq_restore(flags); |