diff options
Diffstat (limited to 'patches/mm_slub__use_migrate_disable_on_PREEMPT_RT.patch')
-rw-r--r-- | patches/mm_slub__use_migrate_disable_on_PREEMPT_RT.patch | 137 |
1 files changed, 137 insertions, 0 deletions
diff --git a/patches/mm_slub__use_migrate_disable_on_PREEMPT_RT.patch b/patches/mm_slub__use_migrate_disable_on_PREEMPT_RT.patch new file mode 100644 index 000000000000..0787a5e5ddf6 --- /dev/null +++ b/patches/mm_slub__use_migrate_disable_on_PREEMPT_RT.patch @@ -0,0 +1,137 @@ +Subject: mm, slub: use migrate_disable() on PREEMPT_RT +From: Vlastimil Babka <vbabka@suse.cz> +Date: Fri May 21 14:03:23 2021 +0200 + +From: Vlastimil Babka <vbabka@suse.cz> + +We currently use preempt_disable() (directly or via get_cpu_ptr()) to stabilize +the pointer to kmem_cache_cpu. On PREEMPT_RT this would be incompatible with +the list_lock spinlock. We can use migrate_disable() instead, but that +increases overhead on !PREEMPT_RT as it's an unconditional function call even +though it's ultimately a migrate_disable() there. + +In order to get the best available mechanism on both PREEMPT_RT and +!PREEMPT_RT, introduce private slub_get_cpu_ptr() and slub_put_cpu_ptr() +wrappers and use them. + +Signed-off-by: Vlastimil Babka <vbabka@suse.cz> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> + + +--- + mm/slub.c | 41 +++++++++++++++++++++++++++++++---------- + 1 file changed, 31 insertions(+), 10 deletions(-) +--- +diff --git a/mm/slub.c b/mm/slub.c +index c4cad4e05c21..44efc5916e32 100644 +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -116,6 +116,26 @@ + * the fast path and disables lockless freelists. + */ + ++/* ++ * We could simply use migrate_disable()/enable() but as long as it's a ++ * function call even on !PREEMPT_RT, use inline preempt_disable() there. ++ */ ++#ifndef CONFIG_PREEMPT_RT ++#define slub_get_cpu_ptr(var) get_cpu_ptr(var) ++#define slub_put_cpu_ptr(var) put_cpu_ptr(var) ++#else ++#define slub_get_cpu_ptr(var) \ ++({ \ ++ migrate_disable(); \ ++ this_cpu_ptr(var); \ ++}) ++#define slub_put_cpu_ptr(var) \ ++do { \ ++ (void)(var); \ ++ migrate_enable(); \ ++} while (0) ++#endif ++ + #ifdef CONFIG_SLUB_DEBUG + #ifdef CONFIG_SLUB_DEBUG_ON + DEFINE_STATIC_KEY_TRUE(slub_debug_enabled); +@@ -2420,7 +2440,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) + int pages; + int pobjects; + +- preempt_disable(); ++ slub_get_cpu_ptr(s->cpu_slab); + do { + pages = 0; + pobjects = 0; +@@ -2451,7 +2471,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) + + } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) + != oldpage); +- preempt_enable(); ++ slub_put_cpu_ptr(s->cpu_slab); + #endif /* CONFIG_SLUB_CPU_PARTIAL */ + } + +@@ -2760,7 +2780,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, + if (unlikely(!pfmemalloc_match(page, gfpflags))) + goto deactivate_slab; + +- /* must check again c->page in case IRQ handler changed it */ ++ /* must check again c->page in case we got preempted and it changed */ + local_irq_save(flags); + if (unlikely(page != c->page)) { + local_irq_restore(flags); +@@ -2819,7 +2839,8 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, + } + if (unlikely(!slub_percpu_partial(c))) { + local_irq_restore(flags); +- goto new_objects; /* stolen by an IRQ handler */ ++ /* we were preempted and partial list got empty */ ++ goto new_objects; + } + + page = c->page = slub_percpu_partial(c); +@@ -2835,9 +2856,9 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, + if (freelist) + goto check_new_page; + +- put_cpu_ptr(s->cpu_slab); ++ slub_put_cpu_ptr(s->cpu_slab); + page = new_slab(s, gfpflags, node); +- c = get_cpu_ptr(s->cpu_slab); ++ c = slub_get_cpu_ptr(s->cpu_slab); + + if (unlikely(!page)) { + slab_out_of_memory(s, gfpflags, node); +@@ -2920,12 +2941,12 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, + * cpu before disabling preemption. Need to reload cpu area + * pointer. + */ +- c = get_cpu_ptr(s->cpu_slab); ++ c = slub_get_cpu_ptr(s->cpu_slab); + #endif + + p = ___slab_alloc(s, gfpflags, node, addr, c); + #ifdef CONFIG_PREEMPT_COUNT +- put_cpu_ptr(s->cpu_slab); ++ slub_put_cpu_ptr(s->cpu_slab); + #endif + return p; + } +@@ -3446,7 +3467,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, + * IRQs, which protects against PREEMPT and interrupts + * handlers invoking normal fastpath. + */ +- c = get_cpu_ptr(s->cpu_slab); ++ c = slub_get_cpu_ptr(s->cpu_slab); + local_irq_disable(); + + for (i = 0; i < size; i++) { +@@ -3492,7 +3513,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, + } + c->tid = next_tid(c->tid); + local_irq_enable(); +- put_cpu_ptr(s->cpu_slab); ++ slub_put_cpu_ptr(s->cpu_slab); + + /* + * memcg and kmem_cache debug support and memory initialization. |