summaryrefslogtreecommitdiff
path: root/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
diff options
context:
space:
mode:
Diffstat (limited to 'patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch')
-rw-r--r--patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch125
1 files changed, 125 insertions, 0 deletions
diff --git a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
new file mode 100644
index 000000000000..cfce7cb0de64
--- /dev/null
+++ b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
@@ -0,0 +1,125 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 13 Jan 2016 15:55:02 +0100
+Subject: net: move xmit_recursion to per-task variable on -RT
+
+A softirq on -RT can be preempted. That means one task is in
+__dev_queue_xmit(), gets preempted and another task may enter
+__dev_queue_xmit() aw well. netperf together with a bridge device
+will then trigger the `recursion alert` because each task increments
+the xmit_recursion variable which is per-CPU.
+A virtual device like br0 is required to trigger this warning.
+
+This patch moves the counter to per task instead per-CPU so it counts
+the recursion properly on -RT.
+
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/netdevice.h | 9 +++++++++
+ include/linux/sched.h | 3 +++
+ net/core/dev.c | 41 ++++++++++++++++++++++++++++++++++++++---
+ 3 files changed, 50 insertions(+), 3 deletions(-)
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -2249,11 +2249,20 @@ void netdev_freemem(struct net_device *d
+ void synchronize_net(void);
+ int init_dummy_netdev(struct net_device *dev);
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++static inline int dev_recursion_level(void)
++{
++ return current->xmit_recursion;
++}
++
++#else
++
+ DECLARE_PER_CPU(int, xmit_recursion);
+ static inline int dev_recursion_level(void)
+ {
+ return this_cpu_read(xmit_recursion);
+ }
++#endif
+
+ struct net_device *dev_get_by_index(struct net *net, int ifindex);
+ struct net_device *__dev_get_by_index(struct net *net, int ifindex);
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1851,6 +1851,9 @@ struct task_struct {
+ #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+ unsigned long task_state_change;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_FULL
++ int xmit_recursion;
++#endif
+ int pagefault_disabled;
+ /* CPU-specific state of this task */
+ struct thread_struct thread;
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2940,9 +2940,44 @@ static void skb_update_prio(struct sk_bu
+ #define skb_update_prio(skb)
+ #endif
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++
++static inline int xmit_rec_read(void)
++{
++ return current->xmit_recursion;
++}
++
++static inline void xmit_rec_inc(void)
++{
++ current->xmit_recursion++;
++}
++
++static inline void xmit_rec_dec(void)
++{
++ current->xmit_recursion--;
++}
++
++#else
++
+ DEFINE_PER_CPU(int, xmit_recursion);
+ EXPORT_SYMBOL(xmit_recursion);
+
++static inline int xmit_rec_read(void)
++{
++ return __this_cpu_read(xmit_recursion);
++}
++
++static inline void xmit_rec_inc(void)
++{
++ __this_cpu_inc(xmit_recursion);
++}
++
++static inline int xmit_rec_dec(void)
++{
++ __this_cpu_dec(xmit_recursion);
++}
++#endif
++
+ #define RECURSION_LIMIT 10
+
+ /**
+@@ -3135,7 +3170,7 @@ static int __dev_queue_xmit(struct sk_bu
+
+ if (txq->xmit_lock_owner != cpu) {
+
+- if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
++ if (xmit_rec_read() > RECURSION_LIMIT)
+ goto recursion_alert;
+
+ skb = validate_xmit_skb(skb, dev);
+@@ -3145,9 +3180,9 @@ static int __dev_queue_xmit(struct sk_bu
+ HARD_TX_LOCK(dev, txq, cpu);
+
+ if (!netif_xmit_stopped(txq)) {
+- __this_cpu_inc(xmit_recursion);
++ xmit_rec_inc();
+ skb = dev_hard_start_xmit(skb, dev, txq, &rc);
+- __this_cpu_dec(xmit_recursion);
++ xmit_rec_dec();
+ if (dev_xmit_complete(rc)) {
+ HARD_TX_UNLOCK(dev, txq);
+ goto out;