diff options
author | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2021-09-07 12:43:49 +0200 |
---|---|---|
committer | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2021-09-07 12:43:49 +0200 |
commit | 5beb0b705c1e1173ebf6e53d33f7f5e0045a2835 (patch) | |
tree | 4d4c2c36ae35ed6445fab729cb266cf6decb416e /patches/0024-mm-slub-separate-detaching-of-partial-list-in-unfree.patch | |
parent | 9985ec69e335f755bcb2e583344155fa6ef914bd (diff) | |
download | linux-rt-8a35f23f0c85f67f2d26bf78aa138d7dcd0bda60.tar.gz |
[ANNOUNCE] v5.14.1-rt19v5.14.1-rt19-patches
Dear RT folks!
I'm pleased to announce the v5.14.1-rt19 patch set.
Changes since v5.14.1-rt18:
- Dan Carpenter reported a possible NULL pointer dereference in
rt_mutex_adjust_prio_chain(). Patch by Peter Zijlstra.
- Unused rt_rwlock_is_contended() has been removed. Reported by kernel
test robot.
- The "play idle" timer was missing a _HARD annotation. It would
freeze the system uppon activation of the intel powerclamp driver.
Reported by Thomas Gleixner.
- Vlastimil Babka SLUB queue has been updated to v6r2.
Known issues
- netconsole triggers WARN.
- The "Memory controller" (CONFIG_MEMCG) has been disabled.
- A RCU and ARM64 warning has been fixed by Valentin Schneider. It is
still not clear if the RCU related change is correct.
- Clark Williams reported issues in i915 (execlists_dequeue_irq())
- Valentin Schneider reported a few splats on ARM64, see
https://https://lkml.kernel.org/r/.kernel.org/lkml/20210810134127.1394269-1-valentin.schneider@arm.com/
The delta patch against v5.14.1-rt18 is appended below and can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.14/incr/patch-5.14.1-rt18-rt19.patch.xz
You can get this release via the git tree at:
git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.14.1-rt19
The RT patch against v5.14.1 can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.14/older/patch-5.14.1-rt19.patch.xz
The split quilt queue is available at:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.14/older/patches-5.14.1-rt19.tar.xz
Sebastian
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Diffstat (limited to 'patches/0024-mm-slub-separate-detaching-of-partial-list-in-unfree.patch')
-rw-r--r-- | patches/0024-mm-slub-separate-detaching-of-partial-list-in-unfree.patch | 156 |
1 files changed, 156 insertions, 0 deletions
diff --git a/patches/0024-mm-slub-separate-detaching-of-partial-list-in-unfree.patch b/patches/0024-mm-slub-separate-detaching-of-partial-list-in-unfree.patch new file mode 100644 index 000000000000..7a01091b8456 --- /dev/null +++ b/patches/0024-mm-slub-separate-detaching-of-partial-list-in-unfree.patch @@ -0,0 +1,156 @@ +From: Vlastimil Babka <vbabka@suse.cz> +Date: Thu, 20 May 2021 16:39:51 +0200 +Subject: [PATCH 24/33] mm, slub: separate detaching of partial list in + unfreeze_partials() from unfreezing + +Unfreezing partial list can be split to two phases - detaching the list from +struct kmem_cache_cpu, and processing the list. The whole operation does not +need to be protected by disabled irqs. Restructure the code to separate the +detaching (with disabled irqs) and unfreezing (with irq disabling to be reduced +in the next patch). + +Also, unfreeze_partials() can be called from another cpu on behalf of a cpu +that is being offlined, where disabling irqs on the local cpu has no sense, so +restructure the code as follows: + +- __unfreeze_partials() is the bulk of unfreeze_partials() that processes the + detached percpu partial list +- unfreeze_partials() detaches list from current cpu with irqs disabled and + calls __unfreeze_partials() +- unfreeze_partials_cpu() is to be called for the offlined cpu so it needs no + irq disabling, and is called from __flush_cpu_slab() +- flush_cpu_slab() is for the local cpu thus it needs to call + unfreeze_partials(). So it can't simply call + __flush_cpu_slab(smp_processor_id()) anymore and we have to open-code the + proper calls. + +Signed-off-by: Vlastimil Babka <vbabka@suse.cz> +--- + mm/slub.c | 73 +++++++++++++++++++++++++++++++++++++++++++------------------- + 1 file changed, 51 insertions(+), 22 deletions(-) + +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -2347,25 +2347,15 @@ static void deactivate_slab(struct kmem_ + } + } + +-/* +- * Unfreeze all the cpu partial slabs. +- * +- * This function must be called with preemption or migration +- * disabled with c local to the cpu. +- */ +-static void unfreeze_partials(struct kmem_cache *s, +- struct kmem_cache_cpu *c) +-{ + #ifdef CONFIG_SLUB_CPU_PARTIAL ++static void __unfreeze_partials(struct kmem_cache *s, struct page *partial_page) ++{ + struct kmem_cache_node *n = NULL, *n2 = NULL; +- struct page *page, *partial_page, *discard_page = NULL; ++ struct page *page, *discard_page = NULL; + unsigned long flags; + + local_irq_save(flags); + +- partial_page = slub_percpu_partial(c); +- c->partial = NULL; +- + while (partial_page) { + struct page new; + struct page old; +@@ -2420,10 +2410,45 @@ static void unfreeze_partials(struct kme + discard_slab(s, page); + stat(s, FREE_SLAB); + } ++} + +-#endif /* CONFIG_SLUB_CPU_PARTIAL */ ++/* ++ * Unfreeze all the cpu partial slabs. ++ */ ++static void unfreeze_partials(struct kmem_cache *s) ++{ ++ struct page *partial_page; ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ partial_page = this_cpu_read(s->cpu_slab->partial); ++ this_cpu_write(s->cpu_slab->partial, NULL); ++ local_irq_restore(flags); ++ ++ if (partial_page) ++ __unfreeze_partials(s, partial_page); ++} ++ ++static void unfreeze_partials_cpu(struct kmem_cache *s, ++ struct kmem_cache_cpu *c) ++{ ++ struct page *partial_page; ++ ++ partial_page = slub_percpu_partial(c); ++ c->partial = NULL; ++ ++ if (partial_page) ++ __unfreeze_partials(s, partial_page); + } + ++#else /* CONFIG_SLUB_CPU_PARTIAL */ ++ ++static inline void unfreeze_partials(struct kmem_cache *s) { } ++static inline void unfreeze_partials_cpu(struct kmem_cache *s, ++ struct kmem_cache_cpu *c) { } ++ ++#endif /* CONFIG_SLUB_CPU_PARTIAL */ ++ + /* + * Put a page that was just frozen (in __slab_free|get_partial_node) into a + * partial page slot if available. +@@ -2452,7 +2477,7 @@ static void put_cpu_partial(struct kmem_ + * partial array is full. Move the existing + * set to the per node partial list. + */ +- unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); ++ unfreeze_partials(s); + oldpage = NULL; + pobjects = 0; + pages = 0; +@@ -2487,11 +2512,6 @@ static inline void flush_slab(struct kme + stat(s, CPUSLAB_FLUSH); + } + +-/* +- * Flush cpu slab. +- * +- * Called from IPI handler with interrupts disabled. +- */ + static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) + { + struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); +@@ -2499,14 +2519,23 @@ static inline void __flush_cpu_slab(stru + if (c->page) + flush_slab(s, c); + +- unfreeze_partials(s, c); ++ unfreeze_partials_cpu(s, c); + } + ++/* ++ * Flush cpu slab. ++ * ++ * Called from IPI handler with interrupts disabled. ++ */ + static void flush_cpu_slab(void *d) + { + struct kmem_cache *s = d; ++ struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab); ++ ++ if (c->page) ++ flush_slab(s, c); + +- __flush_cpu_slab(s, smp_processor_id()); ++ unfreeze_partials(s); + } + + static bool has_cpu_slab(int cpu, void *info) |