summaryrefslogtreecommitdiff
path: root/patches/0013-mm-slub-move-disabling-irqs-closer-to-get_partial-in.patch
diff options
context:
space:
mode:
Diffstat (limited to 'patches/0013-mm-slub-move-disabling-irqs-closer-to-get_partial-in.patch')
-rw-r--r--patches/0013-mm-slub-move-disabling-irqs-closer-to-get_partial-in.patch96
1 files changed, 96 insertions, 0 deletions
diff --git a/patches/0013-mm-slub-move-disabling-irqs-closer-to-get_partial-in.patch b/patches/0013-mm-slub-move-disabling-irqs-closer-to-get_partial-in.patch
new file mode 100644
index 000000000000..ef3f9b15596c
--- /dev/null
+++ b/patches/0013-mm-slub-move-disabling-irqs-closer-to-get_partial-in.patch
@@ -0,0 +1,96 @@
+From: Vlastimil Babka <vbabka@suse.cz>
+Date: Mon, 10 May 2021 13:56:17 +0200
+Subject: [PATCH 13/33] mm, slub: move disabling irqs closer to get_partial()
+ in ___slab_alloc()
+
+Continue reducing the irq disabled scope. Check for per-cpu partial slabs with
+first with irqs enabled and then recheck with irqs disabled before grabbing
+the slab page. Mostly preparatory for the following patches.
+
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+---
+ mm/slub.c | 34 +++++++++++++++++++++++++---------
+ 1 file changed, 25 insertions(+), 9 deletions(-)
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -2706,11 +2706,6 @@ static void *___slab_alloc(struct kmem_c
+ if (unlikely(node != NUMA_NO_NODE &&
+ !node_isset(node, slab_nodes)))
+ node = NUMA_NO_NODE;
+- local_irq_save(flags);
+- if (unlikely(c->page)) {
+- local_irq_restore(flags);
+- goto reread_page;
+- }
+ goto new_slab;
+ }
+ redo:
+@@ -2751,6 +2746,7 @@ static void *___slab_alloc(struct kmem_c
+
+ if (!freelist) {
+ c->page = NULL;
++ local_irq_restore(flags);
+ stat(s, DEACTIVATE_BYPASS);
+ goto new_slab;
+ }
+@@ -2780,12 +2776,19 @@ static void *___slab_alloc(struct kmem_c
+ goto reread_page;
+ }
+ deactivate_slab(s, page, c->freelist, c);
++ local_irq_restore(flags);
+
+ new_slab:
+
+- lockdep_assert_irqs_disabled();
+-
+ if (slub_percpu_partial(c)) {
++ local_irq_save(flags);
++ if (unlikely(c->page)) {
++ local_irq_restore(flags);
++ goto reread_page;
++ }
++ if (unlikely(!slub_percpu_partial(c)))
++ goto new_objects; /* stolen by an IRQ handler */
++
+ page = c->page = slub_percpu_partial(c);
+ slub_set_percpu_partial(c, page);
+ local_irq_restore(flags);
+@@ -2793,6 +2796,16 @@ static void *___slab_alloc(struct kmem_c
+ goto redo;
+ }
+
++ local_irq_save(flags);
++ if (unlikely(c->page)) {
++ local_irq_restore(flags);
++ goto reread_page;
++ }
++
++new_objects:
++
++ lockdep_assert_irqs_disabled();
++
+ freelist = get_partial(s, gfpflags, node, &page);
+ if (freelist) {
+ c->page = page;
+@@ -2825,15 +2838,18 @@ static void *___slab_alloc(struct kmem_c
+ check_new_page:
+
+ if (kmem_cache_debug(s)) {
+- if (!alloc_debug_processing(s, page, freelist, addr))
++ if (!alloc_debug_processing(s, page, freelist, addr)) {
+ /* Slab failed checks. Next slab needed */
++ c->page = NULL;
++ local_irq_restore(flags);
+ goto new_slab;
+- else
++ } else {
+ /*
+ * For debug case, we don't load freelist so that all
+ * allocations go through alloc_debug_processing()
+ */
+ goto return_single;
++ }
+ }
+
+ if (unlikely(!pfmemalloc_match(page, gfpflags)))