diff options
author | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2018-02-22 15:21:01 +0100 |
---|---|---|
committer | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2018-02-22 15:21:01 +0100 |
commit | 3248c316fa2efe20a1d641d39cae8cbb6768c18d (patch) | |
tree | 88bff023ce56f79fa9dbe5c84eefc8386882b9d0 | |
parent | 92095976d29ae201ab583ebffaa57e5ff5b15fd4 (diff) | |
download | linux-rt-4.14.20-rt17-patches.tar.gz |
[ANNOUNCE] v4.14.20-rt17v4.14.20-rt17-patches
Dear RT folks!
I'm pleased to announce the v4.14.20-rt17 patch set.
Changes since v4.14.20-rt16:
- A RCU warning was disabled if we schedule() while we acquire a
sleeping lock. The warning was still seen on UP only kernels and is
now disabled. Reported by Grygorii Strashko.
- The recording of the recursion limit in networking was changed from
per-CPU to per-task on RT. This was done because BH-context can be
preempted on RT and therefore multiple tasks may attempt to send a
packet and so wrongly increase the counter.
The queue lock owner was still recording the CPU which was holding
the lock instead the task. This will lead to a recursion warning if
the same transmit queue is already used (locked) by another task.
Reported by Kurt Kanzenbach.
Known issues
- A warning triggered in "rcu_note_context_switch" originated from
SyS_timer_gettime(). The issue was always there, it is now
visible. Reported by Grygorii Strashko and Daniel Wagner.
The delta patch against v4.14.20-rt16 is appended below and can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.14/incr/patch-4.14.20-rt16-rt17.patch.xz
You can get this release via the git tree at:
git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.14.20-rt17
The RT patch against v4.14.20 can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.14/older/patch-4.14.20-rt17.patch.xz
The split quilt queue is available at:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.14/older/patches-4.14.20-rt17.tar.xz
Sebastian
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
19 files changed, 298 insertions, 31 deletions
diff --git a/patches/RCU-skip-the-schedule-in-RCU-section-warning-on-UP-t.patch b/patches/RCU-skip-the-schedule-in-RCU-section-warning-on-UP-t.patch new file mode 100644 index 000000000000..86486d57504e --- /dev/null +++ b/patches/RCU-skip-the-schedule-in-RCU-section-warning-on-UP-t.patch @@ -0,0 +1,120 @@ +From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Date: Fri, 16 Feb 2018 11:45:13 +0100 +Subject: [PATCH] RCU: skip the "schedule() in RCU section" warning on UP, + too + +In "RCU: we need to skip that warning but only on sleeping locks" we +skipped a warning on SMP systems in case we schedule out in a RCU +section while attempt to obtain a sleeping lock. This is also required +on UP systems. +In order to do so, I introduce a tiny version of migrate_disable() + +_enable() which only update the counters which we then can check against +on RT && !SMP. + +Cc: stable-rt@vger.kernel.org +Reported-by: Grygorii Strashko <grygorii.strashko@ti.com> +Tested-by: Grygorii Strashko <grygorii.strashko@ti.com> +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + include/linux/preempt.h | 9 +++++++++ + include/linux/sched.h | 6 ++++++ + kernel/rcu/tree_plugin.h | 2 +- + kernel/sched/core.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ + 4 files changed, 61 insertions(+), 1 deletion(-) + +--- a/include/linux/preempt.h ++++ b/include/linux/preempt.h +@@ -211,6 +211,15 @@ extern void migrate_enable(void); + + int __migrate_disabled(struct task_struct *p); + ++#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) ++ ++extern void migrate_disable(void); ++extern void migrate_enable(void); ++static inline int __migrate_disabled(struct task_struct *p) ++{ ++ return 0; ++} ++ + #else + #define migrate_disable() barrier() + #define migrate_enable() barrier() +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -591,6 +591,12 @@ struct task_struct { + # ifdef CONFIG_SCHED_DEBUG + int migrate_disable_atomic; + # endif ++ ++#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) ++ int migrate_disable; ++# ifdef CONFIG_SCHED_DEBUG ++ int migrate_disable_atomic; ++# endif + #endif + + #ifdef CONFIG_PREEMPT_RCU +--- a/kernel/rcu/tree_plugin.h ++++ b/kernel/rcu/tree_plugin.h +@@ -326,7 +326,7 @@ static void rcu_preempt_note_context_swi + int mg_counter = 0; + + RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_preempt_note_context_switch() invoked with interrupts enabled!!!\n"); +-#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) ++#if defined(CONFIG_PREEMPT_RT_BASE) + mg_counter = t->migrate_disable; + #endif + WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0 && !mg_counter); +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -7019,4 +7019,49 @@ void migrate_enable(void) + preempt_enable(); + } + EXPORT_SYMBOL(migrate_enable); ++ ++#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) ++void migrate_disable(void) ++{ ++ struct task_struct *p = current; ++ ++ if (in_atomic() || irqs_disabled()) { ++#ifdef CONFIG_SCHED_DEBUG ++ p->migrate_disable_atomic++; ++#endif ++ return; ++ } ++#ifdef CONFIG_SCHED_DEBUG ++ if (unlikely(p->migrate_disable_atomic)) { ++ tracing_off(); ++ WARN_ON_ONCE(1); ++ } ++#endif ++ ++ p->migrate_disable++; ++} ++EXPORT_SYMBOL(migrate_disable); ++ ++void migrate_enable(void) ++{ ++ struct task_struct *p = current; ++ ++ if (in_atomic() || irqs_disabled()) { ++#ifdef CONFIG_SCHED_DEBUG ++ p->migrate_disable_atomic--; ++#endif ++ return; ++ } ++ ++#ifdef CONFIG_SCHED_DEBUG ++ if (unlikely(p->migrate_disable_atomic)) { ++ tracing_off(); ++ WARN_ON_ONCE(1); ++ } ++#endif ++ ++ WARN_ON_ONCE(p->migrate_disable <= 0); ++ p->migrate_disable--; ++} ++EXPORT_SYMBOL(migrate_enable); + #endif diff --git a/patches/arch-arm64-Add-lazy-preempt-support.patch b/patches/arch-arm64-Add-lazy-preempt-support.patch index 8661fc7be51f..15d442b6e98a 100644 --- a/patches/arch-arm64-Add-lazy-preempt-support.patch +++ b/patches/arch-arm64-Add-lazy-preempt-support.patch @@ -67,7 +67,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org> _TIF_NOHZ) --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c -@@ -38,6 +38,7 @@ int main(void) +@@ -39,6 +39,7 @@ int main(void) BLANK(); DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags)); DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count)); @@ -77,7 +77,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org> DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0)); --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S -@@ -570,11 +570,16 @@ ENDPROC(el1_sync) +@@ -607,11 +607,16 @@ ENDPROC(el1_sync) #ifdef CONFIG_PREEMPT ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count @@ -97,7 +97,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org> #endif #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_on -@@ -588,6 +593,7 @@ ENDPROC(el1_irq) +@@ -625,6 +630,7 @@ ENDPROC(el1_irq) 1: bl preempt_schedule_irq // irq en/disable is done inside ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? diff --git a/patches/arm-disable-NEON-in-kernel-mode.patch b/patches/arm-disable-NEON-in-kernel-mode.patch index 869e1ce1ee2e..3edb741646c9 100644 --- a/patches/arm-disable-NEON-in-kernel-mode.patch +++ b/patches/arm-disable-NEON-in-kernel-mode.patch @@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> select CRYPTO_AES_ARM64 --- a/arch/arm64/crypto/crc32-ce-glue.c +++ b/arch/arm64/crypto/crc32-ce-glue.c -@@ -206,7 +206,8 @@ static struct shash_alg crc32_pmull_algs +@@ -208,7 +208,8 @@ static struct shash_alg crc32_pmull_algs static int __init crc32_pmull_mod_init(void) { diff --git a/patches/arm64-xen--Make-XEN-depend-on-non-rt.patch b/patches/arm64-xen--Make-XEN-depend-on-non-rt.patch index 85237d71532a..9e292b7de479 100644 --- a/patches/arm64-xen--Make-XEN-depend-on-non-rt.patch +++ b/patches/arm64-xen--Make-XEN-depend-on-non-rt.patch @@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig -@@ -774,7 +774,7 @@ config XEN_DOM0 +@@ -777,7 +777,7 @@ config XEN_DOM0 config XEN bool "Xen guest support on ARM64" diff --git a/patches/block-blk-mq-use-swait.patch b/patches/block-blk-mq-use-swait.patch index 50602b10dab5..b30d752f7dc8 100644 --- a/patches/block-blk-mq-use-swait.patch +++ b/patches/block-blk-mq-use-swait.patch @@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/block/blk-core.c +++ b/block/blk-core.c -@@ -785,7 +785,7 @@ int blk_queue_enter(struct request_queue +@@ -794,7 +794,7 @@ int blk_queue_enter(struct request_queue */ smp_rmb(); @@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> !atomic_read(&q->mq_freeze_depth) || blk_queue_dying(q)); if (blk_queue_dying(q)) -@@ -805,7 +805,7 @@ static void blk_queue_usage_counter_rele +@@ -814,7 +814,7 @@ static void blk_queue_usage_counter_rele struct request_queue *q = container_of(ref, struct request_queue, q_usage_counter); @@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } static void blk_rq_timed_out_timer(unsigned long data) -@@ -881,7 +881,7 @@ struct request_queue *blk_alloc_queue_no +@@ -890,7 +890,7 @@ struct request_queue *blk_alloc_queue_no q->bypass_depth = 1; __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags); diff --git a/patches/block-shorten-interrupt-disabled-regions.patch b/patches/block-shorten-interrupt-disabled-regions.patch index bc6b2a544b08..6b275df1b937 100644 --- a/patches/block-shorten-interrupt-disabled-regions.patch +++ b/patches/block-shorten-interrupt-disabled-regions.patch @@ -47,7 +47,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de --- a/block/blk-core.c +++ b/block/blk-core.c -@@ -3291,7 +3291,7 @@ static void queue_unplugged(struct reque +@@ -3300,7 +3300,7 @@ static void queue_unplugged(struct reque blk_run_queue_async(q); else __blk_run_queue(q); @@ -56,7 +56,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de } static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) -@@ -3339,7 +3339,6 @@ EXPORT_SYMBOL(blk_check_plugged); +@@ -3348,7 +3348,6 @@ EXPORT_SYMBOL(blk_check_plugged); void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) { struct request_queue *q; @@ -64,7 +64,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de struct request *rq; LIST_HEAD(list); unsigned int depth; -@@ -3359,11 +3358,6 @@ void blk_flush_plug_list(struct blk_plug +@@ -3368,11 +3367,6 @@ void blk_flush_plug_list(struct blk_plug q = NULL; depth = 0; @@ -76,7 +76,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de while (!list_empty(&list)) { rq = list_entry_rq(list.next); list_del_init(&rq->queuelist); -@@ -3376,7 +3370,7 @@ void blk_flush_plug_list(struct blk_plug +@@ -3385,7 +3379,7 @@ void blk_flush_plug_list(struct blk_plug queue_unplugged(q, depth, from_schedule); q = rq->q; depth = 0; @@ -85,7 +85,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de } /* -@@ -3403,8 +3397,6 @@ void blk_flush_plug_list(struct blk_plug +@@ -3412,8 +3406,6 @@ void blk_flush_plug_list(struct blk_plug */ if (q) queue_unplugged(q, depth, from_schedule); diff --git a/patches/irqwork-push_most_work_into_softirq_context.patch b/patches/irqwork-push_most_work_into_softirq_context.patch index ab16d1de47e2..1bbc3879586e 100644 --- a/patches/irqwork-push_most_work_into_softirq_context.patch +++ b/patches/irqwork-push_most_work_into_softirq_context.patch @@ -143,7 +143,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c -@@ -273,6 +273,7 @@ static int init_rootdomain(struct root_d +@@ -286,6 +286,7 @@ static int init_rootdomain(struct root_d rd->rto_cpu = -1; raw_spin_lock_init(&rd->rto_lock); init_irq_work(&rd->rto_push_work, rto_push_irq_work_func); diff --git a/patches/localversion.patch b/patches/localversion.patch index 0cccc7790a5d..3dc62b40b5be 100644 --- a/patches/localversion.patch +++ b/patches/localversion.patch @@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- /dev/null +++ b/localversion-rt @@ -0,0 +1 @@ -+-rt16 ++-rt17 diff --git a/patches/mm-memcontrol-do_not_disable_irq.patch b/patches/mm-memcontrol-do_not_disable_irq.patch index 9cddf870e0b2..da36000c66b5 100644 --- a/patches/mm-memcontrol-do_not_disable_irq.patch +++ b/patches/mm-memcontrol-do_not_disable_irq.patch @@ -88,7 +88,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); -@@ -5993,6 +5996,7 @@ void mem_cgroup_swapout(struct page *pag +@@ -6007,6 +6010,7 @@ void mem_cgroup_swapout(struct page *pag struct mem_cgroup *memcg, *swap_memcg; unsigned int nr_entries; unsigned short oldid; @@ -96,7 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> VM_BUG_ON_PAGE(PageLRU(page), page); VM_BUG_ON_PAGE(page_count(page), page); -@@ -6038,13 +6042,17 @@ void mem_cgroup_swapout(struct page *pag +@@ -6052,13 +6056,17 @@ void mem_cgroup_swapout(struct page *pag * important here to have the interrupts disabled because it is the * only synchronisation we have for udpating the per-CPU variables. */ diff --git a/patches/mm-rt-kmap-atomic-scheduling.patch b/patches/mm-rt-kmap-atomic-scheduling.patch index 064e4f9d444c..a4c4786a4a20 100644 --- a/patches/mm-rt-kmap-atomic-scheduling.patch +++ b/patches/mm-rt-kmap-atomic-scheduling.patch @@ -229,7 +229,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins /* task_struct member predeclarations (sorted alphabetically): */ struct audit_context; -@@ -1104,6 +1105,12 @@ struct task_struct { +@@ -1110,6 +1111,12 @@ struct task_struct { int softirq_nestcnt; unsigned int softirqs_raised; #endif diff --git a/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch b/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch index 3df60de33c87..a089e5155c8a 100644 --- a/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch +++ b/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch @@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -5249,6 +5249,7 @@ bool napi_schedule_prep(struct napi_stru +@@ -5253,6 +5253,7 @@ bool napi_schedule_prep(struct napi_stru } EXPORT_SYMBOL(napi_schedule_prep); @@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /** * __napi_schedule_irqoff - schedule for receive * @n: entry to schedule -@@ -5260,6 +5261,7 @@ void __napi_schedule_irqoff(struct napi_ +@@ -5264,6 +5265,7 @@ void __napi_schedule_irqoff(struct napi_ ____napi_schedule(this_cpu_ptr(&softnet_data), n); } EXPORT_SYMBOL(__napi_schedule_irqoff); diff --git a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch index 53857925ec8d..2129e041a291 100644 --- a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch +++ b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch @@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -1107,6 +1107,9 @@ struct task_struct { +@@ -1113,6 +1113,9 @@ struct task_struct { #ifdef CONFIG_DEBUG_ATOMIC_SLEEP unsigned long task_state_change; #endif diff --git a/patches/net-prevent-abba-deadlock.patch b/patches/net-prevent-abba-deadlock.patch index ed9035be543b..9a9ff10423c4 100644 --- a/patches/net-prevent-abba-deadlock.patch +++ b/patches/net-prevent-abba-deadlock.patch @@ -95,7 +95,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/net/core/sock.c +++ b/net/core/sock.c -@@ -2762,12 +2762,11 @@ void lock_sock_nested(struct sock *sk, i +@@ -2759,12 +2759,11 @@ void lock_sock_nested(struct sock *sk, i if (sk->sk_lock.owned) __lock_sock(sk); sk->sk_lock.owned = 1; diff --git a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch index e53a9c5a6d85..c9050d0bd85a 100644 --- a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch +++ b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch @@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> void raise_softirq_irqoff(unsigned int nr) --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -5654,7 +5654,7 @@ static __latent_entropy void net_rx_acti +@@ -5658,7 +5658,7 @@ static __latent_entropy void net_rx_acti list_splice_tail(&repoll, &list); list_splice(&list, &sd->poll_list); if (!list_empty(&sd->poll_list)) diff --git a/patches/net-use-task_struct-instead-of-CPU-number-as-the-que.patch b/patches/net-use-task_struct-instead-of-CPU-number-as-the-que.patch new file mode 100644 index 000000000000..9a6e66df6637 --- /dev/null +++ b/patches/net-use-task_struct-instead-of-CPU-number-as-the-que.patch @@ -0,0 +1,145 @@ +From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Date: Wed, 21 Feb 2018 10:39:54 +0100 +Subject: [PATCH] net: use task_struct instead of CPU number as the queue + owner on -RT + +In commit ("net: move xmit_recursion to per-task variable on -RT") the +recursion level was changed to be per-task since we can get preempted in +BH on -RT. The lock owner should consequently be recorded as the task +that holds the lock and not the CPU. Otherwise we trigger the "Dead loop +on virtual device" warning on SMP systems. + +Cc: stable-rt@vger.kernel.org +Reported-by: Kurt Kanzenbach <kurt.kanzenbach@linutronix.de> +Tested-by: Kurt Kanzenbach <kurt.kanzenbach@linutronix.de> +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + include/linux/netdevice.h | 54 ++++++++++++++++++++++++++++++++++++++++------ + net/core/dev.c | 6 ++++- + 2 files changed, 53 insertions(+), 7 deletions(-) + +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -571,7 +571,11 @@ struct netdev_queue { + * write-mostly part + */ + spinlock_t _xmit_lock ____cacheline_aligned_in_smp; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ struct task_struct *xmit_lock_owner; ++#else + int xmit_lock_owner; ++#endif + /* + * Time (in jiffies) of last Tx + */ +@@ -3535,10 +3539,48 @@ static inline u32 netif_msg_init(int deb + return (1 << debug_value) - 1; + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu) ++{ ++ txq->xmit_lock_owner = current; ++} ++ ++static inline void netdev_queue_clear_owner(struct netdev_queue *txq) ++{ ++ txq->xmit_lock_owner = NULL; ++} ++ ++static inline bool netdev_queue_has_owner(struct netdev_queue *txq) ++{ ++ if (txq->xmit_lock_owner != NULL) ++ return true; ++ return false; ++} ++ ++#else ++ ++static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu) ++{ ++ txq->xmit_lock_owner = cpu; ++} ++ ++static inline void netdev_queue_clear_owner(struct netdev_queue *txq) ++{ ++ txq->xmit_lock_owner = -1; ++} ++ ++static inline bool netdev_queue_has_owner(struct netdev_queue *txq) ++{ ++ if (txq->xmit_lock_owner != -1) ++ return true; ++ return false; ++} ++#endif ++ + static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) + { + spin_lock(&txq->_xmit_lock); +- txq->xmit_lock_owner = cpu; ++ netdev_queue_set_owner(txq, cpu); + } + + static inline bool __netif_tx_acquire(struct netdev_queue *txq) +@@ -3555,32 +3597,32 @@ static inline void __netif_tx_release(st + static inline void __netif_tx_lock_bh(struct netdev_queue *txq) + { + spin_lock_bh(&txq->_xmit_lock); +- txq->xmit_lock_owner = smp_processor_id(); ++ netdev_queue_set_owner(txq, smp_processor_id()); + } + + static inline bool __netif_tx_trylock(struct netdev_queue *txq) + { + bool ok = spin_trylock(&txq->_xmit_lock); + if (likely(ok)) +- txq->xmit_lock_owner = smp_processor_id(); ++ netdev_queue_set_owner(txq, smp_processor_id()); + return ok; + } + + static inline void __netif_tx_unlock(struct netdev_queue *txq) + { +- txq->xmit_lock_owner = -1; ++ netdev_queue_clear_owner(txq); + spin_unlock(&txq->_xmit_lock); + } + + static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) + { +- txq->xmit_lock_owner = -1; ++ netdev_queue_clear_owner(txq); + spin_unlock_bh(&txq->_xmit_lock); + } + + static inline void txq_trans_update(struct netdev_queue *txq) + { +- if (txq->xmit_lock_owner != -1) ++ if (netdev_queue_has_owner(txq)) + txq->trans_start = jiffies; + } + +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -3476,7 +3476,11 @@ static int __dev_queue_xmit(struct sk_bu + if (dev->flags & IFF_UP) { + int cpu = smp_processor_id(); /* ok because BHs are off */ + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (txq->xmit_lock_owner != current) { ++#else + if (txq->xmit_lock_owner != cpu) { ++#endif + if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT)) + goto recursion_alert; + +@@ -7481,7 +7485,7 @@ static void netdev_init_one_queue(struct + /* Initialize queue lock */ + spin_lock_init(&queue->_xmit_lock); + netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); +- queue->xmit_lock_owner = -1; ++ netdev_queue_clear_owner(queue); + netdev_queue_numa_node_write(queue, NUMA_NO_NODE); + queue->dev = dev; + #ifdef CONFIG_BQL diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch index 301bf59a4d93..da481ac61f82 100644 --- a/patches/preempt-lazy-support.patch +++ b/patches/preempt-lazy-support.patch @@ -102,7 +102,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #define sched_preempt_enable_no_resched() \ do { \ barrier(); \ -@@ -241,6 +261,13 @@ do { \ +@@ -250,6 +270,13 @@ do { \ __preempt_schedule(); \ } while (0) @@ -116,7 +116,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #else /* !CONFIG_PREEMPT */ #define preempt_enable() \ do { \ -@@ -248,6 +275,12 @@ do { \ +@@ -257,6 +284,12 @@ do { \ preempt_count_dec(); \ } while (0) @@ -129,7 +129,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #define preempt_enable_notrace() \ do { \ barrier(); \ -@@ -314,7 +347,7 @@ do { \ +@@ -323,7 +356,7 @@ do { \ } while (0) #define preempt_fold_need_resched() \ do { \ @@ -140,7 +140,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -1600,6 +1600,44 @@ static inline int test_tsk_need_resched( +@@ -1606,6 +1606,44 @@ static inline int test_tsk_need_resched( return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } @@ -474,7 +474,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h -@@ -1544,6 +1544,15 @@ extern void init_sched_fair_class(void); +@@ -1546,6 +1546,15 @@ extern void init_sched_fair_class(void); extern void resched_curr(struct rq *rq); extern void resched_cpu(int cpu); diff --git a/patches/sched-rt-mutex-wakeup.patch b/patches/sched-rt-mutex-wakeup.patch index 114b1ed2dd0f..5e5735f43be2 100644 --- a/patches/sched-rt-mutex-wakeup.patch +++ b/patches/sched-rt-mutex-wakeup.patch @@ -84,7 +84,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return try_to_wake_up(p, state, 0); --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h -@@ -1352,6 +1352,7 @@ static inline void finish_lock_switch(st +@@ -1354,6 +1354,7 @@ static inline void finish_lock_switch(st #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ #define WF_FORK 0x02 /* child wakeup after fork */ #define WF_MIGRATED 0x4 /* internal use, task got migrated */ diff --git a/patches/series b/patches/series index 04f63ba2742e..c8f45c26da0c 100644 --- a/patches/series +++ b/patches/series @@ -436,6 +436,7 @@ locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch ptrace-fix-ptrace-vs-tasklist_lock-race.patch RCU-we-need-to-skip-that-warning-but-only-on-sleepin.patch +RCU-skip-the-schedule-in-RCU-section-warning-on-UP-t.patch locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch # RCU @@ -516,6 +517,7 @@ sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch skbufhead-raw-lock.patch net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch net-move-xmit_recursion-to-per-task-variable-on-RT.patch +net-use-task_struct-instead-of-CPU-number-as-the-que.patch net-provide-a-way-to-delegate-processing-a-softirq-t.patch net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch net-Qdisc-use-a-seqlock-instead-seqcount.patch diff --git a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch index b79d26545dd8..82c3af06c5ab 100644 --- a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch +++ b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch @@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -4027,7 +4027,7 @@ static int netif_rx_internal(struct sk_b +@@ -4031,7 +4031,7 @@ static int netif_rx_internal(struct sk_b struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu; @@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> rcu_read_lock(); cpu = get_rps_cpu(skb->dev, skb, &rflow); -@@ -4037,14 +4037,14 @@ static int netif_rx_internal(struct sk_b +@@ -4041,14 +4041,14 @@ static int netif_rx_internal(struct sk_b ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); rcu_read_unlock(); |