summaryrefslogtreecommitdiff
path: root/patches/net-another-local-irq-disable-alloc-atomic-headache.patch
diff options
context:
space:
mode:
Diffstat (limited to 'patches/net-another-local-irq-disable-alloc-atomic-headache.patch')
-rw-r--r--patches/net-another-local-irq-disable-alloc-atomic-headache.patch20
1 files changed, 18 insertions, 2 deletions
diff --git a/patches/net-another-local-irq-disable-alloc-atomic-headache.patch b/patches/net-another-local-irq-disable-alloc-atomic-headache.patch
index 19a6826d8af0..6180444570e5 100644
--- a/patches/net-another-local-irq-disable-alloc-atomic-headache.patch
+++ b/patches/net-another-local-irq-disable-alloc-atomic-headache.patch
@@ -6,8 +6,8 @@ Replace it by a local lock. Though that's pretty inefficient :(
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- net/core/skbuff.c | 6 ++++--
- 1 file changed, 4 insertions(+), 2 deletions(-)
+ net/core/skbuff.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -40,3 +40,19 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return data;
}
+@@ -427,13 +429,13 @@ struct sk_buff *__netdev_alloc_skb(struc
+ if (sk_memalloc_socks())
+ gfp_mask |= __GFP_MEMALLOC;
+
+- local_irq_save(flags);
++ local_lock_irqsave(netdev_alloc_lock, flags);
+
+ nc = this_cpu_ptr(&netdev_alloc_cache);
+ data = __alloc_page_frag(nc, len, gfp_mask);
+ pfmemalloc = nc->pfmemalloc;
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(netdev_alloc_lock, flags);
+
+ if (unlikely(!data))
+ return NULL;