summaryrefslogtreecommitdiff
path: root/patches/0017-mm-slub-stop-disabling-irqs-around-get_partial.patch
diff options
context:
space:
mode:
Diffstat (limited to 'patches/0017-mm-slub-stop-disabling-irqs-around-get_partial.patch')
-rw-r--r--patches/0017-mm-slub-stop-disabling-irqs-around-get_partial.patch86
1 files changed, 86 insertions, 0 deletions
diff --git a/patches/0017-mm-slub-stop-disabling-irqs-around-get_partial.patch b/patches/0017-mm-slub-stop-disabling-irqs-around-get_partial.patch
new file mode 100644
index 000000000000..8a837152186c
--- /dev/null
+++ b/patches/0017-mm-slub-stop-disabling-irqs-around-get_partial.patch
@@ -0,0 +1,86 @@
+From: Vlastimil Babka <vbabka@suse.cz>
+Date: Tue, 11 May 2021 17:45:26 +0200
+Subject: [PATCH 17/33] mm, slub: stop disabling irqs around get_partial()
+
+The function get_partial() does not need to have irqs disabled as a whole. It's
+sufficient to convert spin_lock operations to their irq saving/restoring
+versions.
+
+As a result, it's now possible to reach the page allocator from the slab
+allocator without disabling and re-enabling interrupts on the way.
+
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+---
+ mm/slub.c | 22 ++++++++--------------
+ 1 file changed, 8 insertions(+), 14 deletions(-)
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -2010,11 +2010,12 @@ static inline bool pfmemalloc_match(stru
+ * Try to allocate a partial slab from a specific node.
+ */
+ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
+- struct page **ret_page, gfp_t flags)
++ struct page **ret_page, gfp_t gfpflags)
+ {
+ struct page *page, *page2;
+ void *object = NULL;
+ unsigned int available = 0;
++ unsigned long flags;
+ int objects;
+
+ /*
+@@ -2026,11 +2027,11 @@ static void *get_partial_node(struct kme
+ if (!n || !n->nr_partial)
+ return NULL;
+
+- spin_lock(&n->list_lock);
++ spin_lock_irqsave(&n->list_lock, flags);
+ list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
+ void *t;
+
+- if (!pfmemalloc_match(page, flags))
++ if (!pfmemalloc_match(page, gfpflags))
+ continue;
+
+ t = acquire_slab(s, n, page, object == NULL, &objects);
+@@ -2051,7 +2052,7 @@ static void *get_partial_node(struct kme
+ break;
+
+ }
+- spin_unlock(&n->list_lock);
++ spin_unlock_irqrestore(&n->list_lock, flags);
+ return object;
+ }
+
+@@ -2779,8 +2780,10 @@ static void *___slab_alloc(struct kmem_c
+ local_irq_restore(flags);
+ goto reread_page;
+ }
+- if (unlikely(!slub_percpu_partial(c)))
++ if (unlikely(!slub_percpu_partial(c))) {
++ local_irq_restore(flags);
+ goto new_objects; /* stolen by an IRQ handler */
++ }
+
+ page = c->page = slub_percpu_partial(c);
+ slub_set_percpu_partial(c, page);
+@@ -2789,18 +2792,9 @@ static void *___slab_alloc(struct kmem_c
+ goto redo;
+ }
+
+- local_irq_save(flags);
+- if (unlikely(c->page)) {
+- local_irq_restore(flags);
+- goto reread_page;
+- }
+-
+ new_objects:
+
+- lockdep_assert_irqs_disabled();
+-
+ freelist = get_partial(s, gfpflags, node, &page);
+- local_irq_restore(flags);
+ if (freelist)
+ goto check_new_page;
+