summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--patches/0001-bpf-Tighten-the-requirements-for-preallocated-hash-m.patch2
-rw-r--r--patches/0001-workqueue-Don-t-assume-that-the-callback-has-interru.patch35
-rw-r--r--patches/0002-bpf-Enforce-preallocation-for-instrumentation-progra.patch2
-rw-r--r--patches/0002-sched-swait-Add-swait_event_lock_irq.patch33
-rw-r--r--patches/0003-workqueue-Use-swait-for-wq_manager_wait.patch53
-rw-r--r--patches/0004-bpf-tracing-Remove-redundant-preempt_disable-in-__bp.patch2
-rw-r--r--patches/0012-bpf-Use-bpf_prog_run_pin_on_cpu-at-simple-call-sites.patch2
-rw-r--r--patches/0018-bpf-Replace-open-coded-recursion-prevention-in-sys_b.patch4
-rw-r--r--patches/ARM-Allow-to-enable-RT.patch2
-rw-r--r--patches/ARM64-Allow-to-enable-RT.patch2
-rw-r--r--patches/Bluetooth-Acquire-sk_lock.slock-without-disabling-in.patch58
-rw-r--r--patches/POWERPC-Allow-to-enable-RT.patch2
-rw-r--r--patches/arch-arm64-Add-lazy-preempt-support.patch2
-rw-r--r--patches/arm-preempt-lazy-support.patch6
-rw-r--r--patches/cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch4
-rw-r--r--patches/jump-label-rt.patch2
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/power-disable-highmem-on-rt.patch2
-rw-r--r--patches/powerpc-preempt-lazy-support.patch2
-rw-r--r--patches/preempt-lazy-support.patch6
-rw-r--r--patches/series9
-rw-r--r--patches/skbufhead-raw-lock.patch14
-rw-r--r--patches/softirq-preempt-fix-3-re.patch6
-rw-r--r--patches/workqueue-Convert-the-pool-lock-and-wq_mayday_lock-t.patch (renamed from patches/0004-workqueue-Convert-the-locks-to-raw-type.patch)154
-rw-r--r--patches/workqueue-Use-rcuwait-for-wq_manager_wait.patch88
-rw-r--r--patches/x86-Disable-HAVE_ARCH_JUMP_LABEL.patch2
-rw-r--r--patches/x86-Enable-RT-also-on-32bit.patch2
-rw-r--r--patches/x86-preempt-lazy.patch2
28 files changed, 272 insertions, 228 deletions
diff --git a/patches/0001-bpf-Tighten-the-requirements-for-preallocated-hash-m.patch b/patches/0001-bpf-Tighten-the-requirements-for-preallocated-hash-m.patch
index c43a095290a1..2ba92001b69f 100644
--- a/patches/0001-bpf-Tighten-the-requirements-for-preallocated-hash-m.patch
+++ b/patches/0001-bpf-Tighten-the-requirements-for-preallocated-hash-m.patch
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
-@@ -8205,26 +8205,43 @@ static bool is_tracing_prog_type(enum bp
+@@ -8207,26 +8207,43 @@ static bool is_tracing_prog_type(enum bp
}
}
diff --git a/patches/0001-workqueue-Don-t-assume-that-the-callback-has-interru.patch b/patches/0001-workqueue-Don-t-assume-that-the-callback-has-interru.patch
deleted file mode 100644
index 4155fd8af798..000000000000
--- a/patches/0001-workqueue-Don-t-assume-that-the-callback-has-interru.patch
+++ /dev/null
@@ -1,35 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 11 Jun 2019 11:21:02 +0200
-Subject: [PATCH 1/4] workqueue: Don't assume that the callback has interrupts
- disabled
-
-Due to the TIMER_IRQSAFE flag, the timer callback is invoked with
-disabled interrupts. On -RT the callback is invoked in softirq context
-with enabled interrupts. Since the interrupts are threaded, there are
-are no in_irq() users. The local_bh_disable() around the threaded
-handler ensures that there is either a timer or a threaded handler
-active on the CPU.
-
-Disable interrupts before __queue_work() is invoked from the timer
-callback.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/workqueue.c | 4 +++-
- 1 file changed, 3 insertions(+), 1 deletion(-)
-
---- a/kernel/workqueue.c
-+++ b/kernel/workqueue.c
-@@ -1608,9 +1608,11 @@ EXPORT_SYMBOL_GPL(queue_work_node);
- void delayed_work_timer_fn(struct timer_list *t)
- {
- struct delayed_work *dwork = from_timer(dwork, t, timer);
-+ unsigned long flags;
-
-- /* should have been called from irqsafe timer with irq already off */
-+ local_irq_save(flags);
- __queue_work(dwork->cpu, dwork->wq, &dwork->work);
-+ local_irq_restore(flags);
- }
- EXPORT_SYMBOL(delayed_work_timer_fn);
-
diff --git a/patches/0002-bpf-Enforce-preallocation-for-instrumentation-progra.patch b/patches/0002-bpf-Enforce-preallocation-for-instrumentation-progra.patch
index d788cd92faae..b1fbb59d5440 100644
--- a/patches/0002-bpf-Enforce-preallocation-for-instrumentation-progra.patch
+++ b/patches/0002-bpf-Enforce-preallocation-for-instrumentation-progra.patch
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
-@@ -8230,16 +8230,21 @@ static int check_map_prog_compatibility(
+@@ -8232,16 +8232,21 @@ static int check_map_prog_compatibility(
* of the memory allocator or at a place where a recursion into the
* memory allocator would see inconsistent state.
*
diff --git a/patches/0002-sched-swait-Add-swait_event_lock_irq.patch b/patches/0002-sched-swait-Add-swait_event_lock_irq.patch
deleted file mode 100644
index 9e24a802c289..000000000000
--- a/patches/0002-sched-swait-Add-swait_event_lock_irq.patch
+++ /dev/null
@@ -1,33 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 22 May 2019 12:42:26 +0200
-Subject: [PATCH 2/4] sched/swait: Add swait_event_lock_irq()
-
-The swait_event_lock_irq() is inspired by wait_event_lock_irq(). This is
-required by the workqueue code once it switches to swait.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/swait.h | 14 ++++++++++++++
- 1 file changed, 14 insertions(+)
-
---- a/include/linux/swait.h
-+++ b/include/linux/swait.h
-@@ -297,4 +297,18 @@ do { \
- __ret; \
- })
-
-+#define __swait_event_lock_irq(wq, condition, lock, cmd) \
-+ ___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
-+ raw_spin_unlock_irq(&lock); \
-+ cmd; \
-+ schedule(); \
-+ raw_spin_lock_irq(&lock))
-+
-+#define swait_event_lock_irq(wq_head, condition, lock) \
-+ do { \
-+ if (condition) \
-+ break; \
-+ __swait_event_lock_irq(wq_head, condition, lock, ); \
-+ } while (0)
-+
- #endif /* _LINUX_SWAIT_H */
diff --git a/patches/0003-workqueue-Use-swait-for-wq_manager_wait.patch b/patches/0003-workqueue-Use-swait-for-wq_manager_wait.patch
deleted file mode 100644
index dcbe1eb6b903..000000000000
--- a/patches/0003-workqueue-Use-swait-for-wq_manager_wait.patch
+++ /dev/null
@@ -1,53 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 11 Jun 2019 11:21:09 +0200
-Subject: [PATCH 3/4] workqueue: Use swait for wq_manager_wait
-
-In order for the workqueue code use raw_spinlock_t typed locking there
-must not be a spinlock_t typed lock be acquired. A wait_queue_head uses
-a spinlock_t lock for its list protection.
-
-Use a swait based queue head to avoid raw_spinlock_t -> spinlock_t
-locking.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/workqueue.c | 7 ++++---
- 1 file changed, 4 insertions(+), 3 deletions(-)
-
---- a/kernel/workqueue.c
-+++ b/kernel/workqueue.c
-@@ -50,6 +50,7 @@
- #include <linux/uaccess.h>
- #include <linux/sched/isolation.h>
- #include <linux/nmi.h>
-+#include <linux/swait.h>
-
- #include "workqueue_internal.h"
-
-@@ -301,7 +302,7 @@ static struct workqueue_attrs *wq_update
- static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
- static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
- static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
--static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
-+static DECLARE_SWAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
-
- static LIST_HEAD(workqueues); /* PR: list of all workqueues */
- static bool workqueue_freezing; /* PL: have wqs started freezing? */
-@@ -2140,7 +2141,7 @@ static bool manage_workers(struct worker
-
- pool->manager = NULL;
- pool->flags &= ~POOL_MANAGER_ACTIVE;
-- wake_up(&wq_manager_wait);
-+ swake_up_one(&wq_manager_wait);
- return true;
- }
-
-@@ -3541,7 +3542,7 @@ static void put_unbound_pool(struct work
- * manager and @pool gets freed with the flag set.
- */
- spin_lock_irq(&pool->lock);
-- wait_event_lock_irq(wq_manager_wait,
-+ swait_event_lock_irq(wq_manager_wait,
- !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
- pool->flags |= POOL_MANAGER_ACTIVE;
-
diff --git a/patches/0004-bpf-tracing-Remove-redundant-preempt_disable-in-__bp.patch b/patches/0004-bpf-tracing-Remove-redundant-preempt_disable-in-__bp.patch
index 8bd4f26cf5bb..49bae0c337c1 100644
--- a/patches/0004-bpf-tracing-Remove-redundant-preempt_disable-in-__bp.patch
+++ b/patches/0004-bpf-tracing-Remove-redundant-preempt_disable-in-__bp.patch
@@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
-@@ -1505,10 +1505,9 @@ void bpf_put_raw_tracepoint(struct bpf_r
+@@ -1507,10 +1507,9 @@ void bpf_put_raw_tracepoint(struct bpf_r
static __always_inline
void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
{
diff --git a/patches/0012-bpf-Use-bpf_prog_run_pin_on_cpu-at-simple-call-sites.patch b/patches/0012-bpf-Use-bpf_prog_run_pin_on_cpu-at-simple-call-sites.patch
index 91015ce77628..2de61b241237 100644
--- a/patches/0012-bpf-Use-bpf_prog_run_pin_on_cpu-at-simple-call-sites.patch
+++ b/patches/0012-bpf-Use-bpf_prog_run_pin_on_cpu-at-simple-call-sites.patch
@@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* CONFIG_SECCOMP_FILTER */
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
-@@ -920,9 +920,7 @@ bool bpf_flow_dissect(struct bpf_prog *p
+@@ -936,9 +936,7 @@ bool bpf_flow_dissect(struct bpf_prog *p
(int)FLOW_DISSECTOR_F_STOP_AT_ENCAP);
flow_keys->flags = flags;
diff --git a/patches/0018-bpf-Replace-open-coded-recursion-prevention-in-sys_b.patch b/patches/0018-bpf-Replace-open-coded-recursion-prevention-in-sys_b.patch
index 93dec79089ec..ba1b6e41b40b 100644
--- a/patches/0018-bpf-Replace-open-coded-recursion-prevention-in-sys_b.patch
+++ b/patches/0018-bpf-Replace-open-coded-recursion-prevention-in-sys_b.patch
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
maybe_wait_bpf_programs(map);
return err;
-@@ -1136,13 +1129,11 @@ static int map_delete_elem(union bpf_att
+@@ -1147,13 +1140,11 @@ static int map_delete_elem(union bpf_att
goto out;
}
@@ -82,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
maybe_wait_bpf_programs(map);
out:
kfree(key);
-@@ -1254,13 +1245,11 @@ int generic_map_delete_batch(struct bpf_
+@@ -1265,13 +1256,11 @@ int generic_map_delete_batch(struct bpf_
break;
}
diff --git a/patches/ARM-Allow-to-enable-RT.patch b/patches/ARM-Allow-to-enable-RT.patch
index 9525ddb2e099..1721a11eb9c5 100644
--- a/patches/ARM-Allow-to-enable-RT.patch
+++ b/patches/ARM-Allow-to-enable-RT.patch
@@ -11,7 +11,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -31,6 +31,7 @@ config ARM
+@@ -32,6 +32,7 @@ config ARM
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7
select ARCH_SUPPORTS_ATOMIC_RMW
diff --git a/patches/ARM64-Allow-to-enable-RT.patch b/patches/ARM64-Allow-to-enable-RT.patch
index 0741e1aa44b5..6ad4552aa582 100644
--- a/patches/ARM64-Allow-to-enable-RT.patch
+++ b/patches/ARM64-Allow-to-enable-RT.patch
@@ -11,7 +11,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
-@@ -68,6 +68,7 @@ config ARM64
+@@ -69,6 +69,7 @@ config ARM64
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG)
select ARCH_SUPPORTS_NUMA_BALANCING
diff --git a/patches/Bluetooth-Acquire-sk_lock.slock-without-disabling-in.patch b/patches/Bluetooth-Acquire-sk_lock.slock-without-disabling-in.patch
new file mode 100644
index 000000000000..054c935e4018
--- /dev/null
+++ b/patches/Bluetooth-Acquire-sk_lock.slock-without-disabling-in.patch
@@ -0,0 +1,58 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 28 May 2020 14:35:12 +0200
+Subject: [PATCH] Bluetooth: Acquire sk_lock.slock without disabling interrupts
+
+[ Upstream commit e6da0edc24eecef2f6964d92fa9044e1821deace ]
+
+There was a lockdep which led to commit
+ fad003b6c8e3d ("Bluetooth: Fix inconsistent lock state with RFCOMM")
+
+Lockdep noticed that `sk->sk_lock.slock' was acquired without disabling
+the softirq while the lock was also used in softirq context.
+Unfortunately the solution back then was to disable interrupts before
+acquiring the lock which however made lockdep happy.
+It would have been enough to simply disable the softirq. Disabling
+interrupts before acquiring a spinlock_t is not allowed on PREEMPT_RT
+because these locks are converted to 'sleeping' spinlocks.
+
+Use spin_lock_bh() in order to acquire the `sk_lock.slock'.
+
+Cc: stable-rt@vger.kernel.org
+Reported-by: Luis Claudio R. Goncalves <lclaudio@uudg.org>
+Reported-by: kbuild test robot <lkp@intel.com> [missing unlock]
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/bluetooth/rfcomm/sock.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -64,15 +64,13 @@ static void rfcomm_sk_data_ready(struct
+ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
+ {
+ struct sock *sk = d->owner, *parent;
+- unsigned long flags;
+
+ if (!sk)
+ return;
+
+ BT_DBG("dlc %p state %ld err %d", d, d->state, err);
+
+- local_irq_save(flags);
+- bh_lock_sock(sk);
++ spin_lock_bh(&sk->sk_lock.slock);
+
+ if (err)
+ sk->sk_err = err;
+@@ -93,8 +91,7 @@ static void rfcomm_sk_state_change(struc
+ sk->sk_state_change(sk);
+ }
+
+- bh_unlock_sock(sk);
+- local_irq_restore(flags);
++ spin_unlock_bh(&sk->sk_lock.slock);
+
+ if (parent && sock_flag(sk, SOCK_ZAPPED)) {
+ /* We have to drop DLC lock here, otherwise
diff --git a/patches/POWERPC-Allow-to-enable-RT.patch b/patches/POWERPC-Allow-to-enable-RT.patch
index abd55a299e56..318412076a51 100644
--- a/patches/POWERPC-Allow-to-enable-RT.patch
+++ b/patches/POWERPC-Allow-to-enable-RT.patch
@@ -11,7 +11,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -140,6 +140,7 @@ config PPC
+@@ -141,6 +141,7 @@ config PPC
select ARCH_MIGHT_HAVE_PC_SERIO
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_SUPPORTS_ATOMIC_RMW
diff --git a/patches/arch-arm64-Add-lazy-preempt-support.patch b/patches/arch-arm64-Add-lazy-preempt-support.patch
index b7d4ac3e0c17..d895eb09be0b 100644
--- a/patches/arch-arm64-Add-lazy-preempt-support.patch
+++ b/patches/arch-arm64-Add-lazy-preempt-support.patch
@@ -21,7 +21,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
-@@ -162,6 +162,7 @@ config ARM64
+@@ -163,6 +163,7 @@ config ARM64
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
diff --git a/patches/arm-preempt-lazy-support.patch b/patches/arm-preempt-lazy-support.patch
index 00b475dc8621..80b587b323ab 100644
--- a/patches/arm-preempt-lazy-support.patch
+++ b/patches/arm-preempt-lazy-support.patch
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -102,6 +102,7 @@ config ARM
+@@ -103,6 +103,7 @@ config ARM
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
-@@ -213,11 +213,18 @@ ENDPROC(__dabt_svc)
+@@ -206,11 +206,18 @@ ENDPROC(__dabt_svc)
#ifdef CONFIG_PREEMPTION
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
@@ -95,7 +95,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
svc_exit r5, irq = 1 @ return from exception
-@@ -232,8 +239,14 @@ ENDPROC(__irq_svc)
+@@ -225,8 +232,14 @@ ENDPROC(__irq_svc)
1: bl preempt_schedule_irq @ irq en/disable is done inside
ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
tst r0, #_TIF_NEED_RESCHED
diff --git a/patches/cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch b/patches/cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch
index c02d055d82f4..d1b82254551f 100644
--- a/patches/cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch
+++ b/patches/cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
-@@ -160,8 +160,9 @@ static void cgroup_rstat_flush_locked(st
+@@ -150,8 +150,9 @@ static void cgroup_rstat_flush_locked(st
raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock,
cpu);
struct cgroup *pos = NULL;
@@ -32,7 +32,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) {
struct cgroup_subsys_state *css;
-@@ -173,7 +174,7 @@ static void cgroup_rstat_flush_locked(st
+@@ -163,7 +164,7 @@ static void cgroup_rstat_flush_locked(st
css->ss->css_rstat_flush(css, cpu);
rcu_read_unlock();
}
diff --git a/patches/jump-label-rt.patch b/patches/jump-label-rt.patch
index e610e3176ce2..7ca171519438 100644
--- a/patches/jump-label-rt.patch
+++ b/patches/jump-label-rt.patch
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -63,7 +63,7 @@ config ARM
+@@ -64,7 +64,7 @@ config ARM
select HARDIRQS_SW_RESEND
select HAVE_ARCH_AUDITSYSCALL if AEABI && !OABI_COMPAT
select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 68c7b973cc48..02952cda4bfa 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt8
++-rt9
diff --git a/patches/power-disable-highmem-on-rt.patch b/patches/power-disable-highmem-on-rt.patch
index a11adb227689..5acca8a73185 100644
--- a/patches/power-disable-highmem-on-rt.patch
+++ b/patches/power-disable-highmem-on-rt.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -397,7 +397,7 @@ menu "Kernel options"
+@@ -398,7 +398,7 @@ menu "Kernel options"
config HIGHMEM
bool "High memory support"
diff --git a/patches/powerpc-preempt-lazy-support.patch b/patches/powerpc-preempt-lazy-support.patch
index 1bce37d2d33d..d636ef5b80d4 100644
--- a/patches/powerpc-preempt-lazy-support.patch
+++ b/patches/powerpc-preempt-lazy-support.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -219,6 +219,7 @@ config PPC
+@@ -220,6 +220,7 @@ config PPC
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index 83f2586d7d2e..b4c663656cad 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -425,7 +425,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
hrtick_start(rq, delta);
-@@ -6680,7 +6680,7 @@ static void check_preempt_wakeup(struct
+@@ -6686,7 +6686,7 @@ static void check_preempt_wakeup(struct
return;
preempt:
@@ -434,7 +434,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -10379,7 +10379,7 @@ static void task_fork_fair(struct task_s
+@@ -10385,7 +10385,7 @@ static void task_fork_fair(struct task_s
* 'current' within the tree based on its new key value.
*/
swap(curr->vruntime, se->vruntime);
@@ -443,7 +443,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
se->vruntime -= cfs_rq->min_vruntime;
-@@ -10406,7 +10406,7 @@ prio_changed_fair(struct rq *rq, struct
+@@ -10412,7 +10412,7 @@ prio_changed_fair(struct rq *rq, struct
*/
if (rq->curr == p) {
if (p->prio > oldprio)
diff --git a/patches/series b/patches/series
index 8ebfdfe74522..8275eb0807c7 100644
--- a/patches/series
+++ b/patches/series
@@ -5,6 +5,9 @@
############################################################
# UPSTREAM merged
############################################################
+workqueue-Use-rcuwait-for-wq_manager_wait.patch
+workqueue-Convert-the-pool-lock-and-wq_mayday_lock-t.patch
+Bluetooth-Acquire-sk_lock.slock-without-disabling-in.patch
############################################################
# POSTED by others
@@ -125,12 +128,6 @@ Use-CONFIG_PREEMPTION.patch
sched-__set_cpus_allowed_ptr-Check-cpus_mask-not-cpu.patch
-# WORKQUEUE, delay
-0001-workqueue-Don-t-assume-that-the-callback-has-interru.patch
-0002-sched-swait-Add-swait_event_lock_irq.patch
-0003-workqueue-Use-swait-for-wq_manager_wait.patch
-0004-workqueue-Convert-the-locks-to-raw-type.patch
-
############################################################
# Needs to address review feedback
############################################################
diff --git a/patches/skbufhead-raw-lock.patch b/patches/skbufhead-raw-lock.patch
index 21e5c7eac378..97e6dd9b4bbd 100644
--- a/patches/skbufhead-raw-lock.patch
+++ b/patches/skbufhead-raw-lock.patch
@@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
}
-@@ -5503,7 +5503,7 @@ static void flush_backlog(struct work_st
+@@ -5513,7 +5513,7 @@ static void flush_backlog(struct work_st
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->input_pkt_queue);
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
input_queue_head_incr(sd);
}
}
-@@ -5513,11 +5513,14 @@ static void flush_backlog(struct work_st
+@@ -5523,11 +5523,14 @@ static void flush_backlog(struct work_st
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->process_queue);
@@ -90,7 +90,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void flush_all_backlogs(void)
-@@ -6138,7 +6141,9 @@ static int process_backlog(struct napi_s
+@@ -6148,7 +6151,9 @@ static int process_backlog(struct napi_s
while (again) {
struct sk_buff *skb;
@@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
__netif_receive_skb(skb);
rcu_read_unlock();
-@@ -6146,9 +6151,9 @@ static int process_backlog(struct napi_s
+@@ -6156,9 +6161,9 @@ static int process_backlog(struct napi_s
if (++work >= quota)
return work;
@@ -111,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rps_lock(sd);
if (skb_queue_empty(&sd->input_pkt_queue)) {
/*
-@@ -6630,13 +6635,21 @@ static __latent_entropy void net_rx_acti
+@@ -6640,13 +6645,21 @@ static __latent_entropy void net_rx_acti
unsigned long time_limit = jiffies +
usecs_to_jiffies(netdev_budget_usecs);
int budget = netdev_budget;
@@ -133,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for (;;) {
struct napi_struct *n;
-@@ -10188,10 +10201,13 @@ static int dev_cpu_dead(unsigned int old
+@@ -10198,10 +10211,13 @@ static int dev_cpu_dead(unsigned int old
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
@@ -148,7 +148,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return 0;
}
-@@ -10504,8 +10520,9 @@ static int __init net_dev_init(void)
+@@ -10514,8 +10530,9 @@ static int __init net_dev_init(void)
INIT_WORK(flush, flush_backlog);
diff --git a/patches/softirq-preempt-fix-3-re.patch b/patches/softirq-preempt-fix-3-re.patch
index 83fe32d12396..4fedb4125ce1 100644
--- a/patches/softirq-preempt-fix-3-re.patch
+++ b/patches/softirq-preempt-fix-3-re.patch
@@ -127,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
-@@ -6100,12 +6103,14 @@ static void net_rps_action_and_irq_enabl
+@@ -6110,12 +6113,14 @@ static void net_rps_action_and_irq_enabl
sd->rps_ipi_list = NULL;
local_irq_enable();
@@ -142,7 +142,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -6183,6 +6188,7 @@ void __napi_schedule(struct napi_struct
+@@ -6193,6 +6198,7 @@ void __napi_schedule(struct napi_struct
local_irq_save(flags);
____napi_schedule(this_cpu_ptr(&softnet_data), n);
local_irq_restore(flags);
@@ -150,7 +150,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__napi_schedule);
-@@ -10170,6 +10176,7 @@ static int dev_cpu_dead(unsigned int old
+@@ -10180,6 +10186,7 @@ static int dev_cpu_dead(unsigned int old
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
diff --git a/patches/0004-workqueue-Convert-the-locks-to-raw-type.patch b/patches/workqueue-Convert-the-pool-lock-and-wq_mayday_lock-t.patch
index 469a20defab1..74422f48d76b 100644
--- a/patches/0004-workqueue-Convert-the-locks-to-raw-type.patch
+++ b/patches/workqueue-Convert-the-pool-lock-and-wq_mayday_lock-t.patch
@@ -1,19 +1,37 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 22 May 2019 12:43:56 +0200
-Subject: [PATCH 4/4] workqueue: Convert the locks to raw type
+Date: Wed, 27 May 2020 21:46:33 +0200
+Subject: [PATCH] workqueue: Convert the pool::lock and wq_mayday_lock to
+ raw_spinlock_t
-After all the workqueue and the timer rework, we can finally make the
-worker_pool lock raw.
-The lock is not held over an unbounded period of time/iterations.
+[ Upstream commit a9b8a985294debae00f6c087dfec8c384d30a3b9 ]
+
+The workqueue code has it's internal spinlocks (pool::lock), which
+are acquired on most workqueue operations. These spinlocks are
+converted to 'sleeping' spinlocks on a RT-kernel.
+
+Workqueue functions can be invoked from contexts which are truly atomic
+even on a PREEMPT_RT enabled kernel. Taking sleeping locks from such
+contexts is forbidden.
+
+The pool::lock hold times are bound and the code sections are
+relatively short, which allows to convert pool::lock and as a
+consequence wq_mayday_lock to raw spinlocks which are truly spinning
+locks even on a PREEMPT_RT kernel.
+
+With the previous conversion of the manager waitqueue to a simple
+waitqueue workqueues are now fully RT compliant.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reviewed-by: Lai Jiangshan <jiangshanlai@gmail.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/workqueue.c | 174 ++++++++++++++++++++++++++---------------------------
- 1 file changed, 87 insertions(+), 87 deletions(-)
+ kernel/workqueue.c | 176 ++++++++++++++++++++++++++---------------------------
+ 1 file changed, 88 insertions(+), 88 deletions(-)
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
-@@ -146,7 +146,7 @@ enum {
+@@ -145,7 +145,7 @@ enum {
/* struct worker is defined in workqueue_internal.h */
struct worker_pool {
@@ -22,15 +40,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int cpu; /* I: the associated cpu */
int node; /* I: the associated node ID */
int id; /* I: pool ID */
-@@ -301,7 +301,7 @@ static struct workqueue_attrs *wq_update
+@@ -300,7 +300,7 @@ static struct workqueue_attrs *wq_update
static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
-static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
+static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
- static DECLARE_SWAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
+ /* wait for manager to go away */
+ static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
- static LIST_HEAD(workqueues); /* PR: list of all workqueues */
@@ -827,7 +827,7 @@ static struct worker *first_idle_worker(
* Wake up the first idle worker of @pool.
*
@@ -198,7 +216,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rcu_read_unlock();
}
-@@ -1760,7 +1760,7 @@ EXPORT_SYMBOL(queue_rcu_work);
+@@ -1758,7 +1758,7 @@ EXPORT_SYMBOL(queue_rcu_work);
* necessary.
*
* LOCKING:
@@ -207,7 +225,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
static void worker_enter_idle(struct worker *worker)
{
-@@ -1800,7 +1800,7 @@ static void worker_enter_idle(struct wor
+@@ -1798,7 +1798,7 @@ static void worker_enter_idle(struct wor
* @worker is leaving idle state. Update stats.
*
* LOCKING:
@@ -216,7 +234,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
static void worker_leave_idle(struct worker *worker)
{
-@@ -1938,11 +1938,11 @@ static struct worker *create_worker(stru
+@@ -1936,11 +1936,11 @@ static struct worker *create_worker(stru
worker_attach_to_pool(worker, pool);
/* start the newly created worker */
@@ -230,7 +248,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return worker;
-@@ -1961,7 +1961,7 @@ static struct worker *create_worker(stru
+@@ -1959,7 +1959,7 @@ static struct worker *create_worker(stru
* be idle.
*
* CONTEXT:
@@ -239,7 +257,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
static void destroy_worker(struct worker *worker)
{
-@@ -1987,7 +1987,7 @@ static void idle_worker_timeout(struct t
+@@ -1985,7 +1985,7 @@ static void idle_worker_timeout(struct t
{
struct worker_pool *pool = from_timer(pool, t, idle_timer);
@@ -248,7 +266,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
while (too_many_workers(pool)) {
struct worker *worker;
-@@ -2005,7 +2005,7 @@ static void idle_worker_timeout(struct t
+@@ -2003,7 +2003,7 @@ static void idle_worker_timeout(struct t
destroy_worker(worker);
}
@@ -257,7 +275,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void send_mayday(struct work_struct *work)
-@@ -2036,8 +2036,8 @@ static void pool_mayday_timeout(struct t
+@@ -2034,8 +2034,8 @@ static void pool_mayday_timeout(struct t
struct worker_pool *pool = from_timer(pool, t, mayday_timer);
struct work_struct *work;
@@ -268,7 +286,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (need_to_create_worker(pool)) {
/*
-@@ -2050,8 +2050,8 @@ static void pool_mayday_timeout(struct t
+@@ -2048,8 +2048,8 @@ static void pool_mayday_timeout(struct t
send_mayday(work);
}
@@ -279,7 +297,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
}
-@@ -2070,7 +2070,7 @@ static void pool_mayday_timeout(struct t
+@@ -2068,7 +2068,7 @@ static void pool_mayday_timeout(struct t
* may_start_working() %true.
*
* LOCKING:
@@ -288,7 +306,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* multiple times. Does GFP_KERNEL allocations. Called only from
* manager.
*/
-@@ -2079,7 +2079,7 @@ static void maybe_create_worker(struct w
+@@ -2077,7 +2077,7 @@ static void maybe_create_worker(struct w
__acquires(&pool->lock)
{
restart:
@@ -297,7 +315,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
-@@ -2095,7 +2095,7 @@ static void maybe_create_worker(struct w
+@@ -2093,7 +2093,7 @@ static void maybe_create_worker(struct w
}
del_timer_sync(&pool->mayday_timer);
@@ -306,7 +324,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* This is necessary even after a new worker was just successfully
* created as @pool->lock was dropped and the new worker might have
-@@ -2118,7 +2118,7 @@ static void maybe_create_worker(struct w
+@@ -2116,7 +2116,7 @@ static void maybe_create_worker(struct w
* and may_start_working() is true.
*
* CONTEXT:
@@ -315,7 +333,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* multiple times. Does GFP_KERNEL allocations.
*
* Return:
-@@ -2157,7 +2157,7 @@ static bool manage_workers(struct worker
+@@ -2155,7 +2155,7 @@ static bool manage_workers(struct worker
* call this function to process a work.
*
* CONTEXT:
@@ -324,7 +342,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
static void process_one_work(struct worker *worker, struct work_struct *work)
__releases(&pool->lock)
-@@ -2239,7 +2239,7 @@ static void process_one_work(struct work
+@@ -2237,7 +2237,7 @@ static void process_one_work(struct work
*/
set_work_pool_and_clear_pending(work, pool->id);
@@ -333,7 +351,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
lock_map_acquire(&pwq->wq->lockdep_map);
lock_map_acquire(&lockdep_map);
-@@ -2294,7 +2294,7 @@ static void process_one_work(struct work
+@@ -2292,7 +2292,7 @@ static void process_one_work(struct work
*/
cond_resched();
@@ -342,7 +360,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* clear cpu intensive status */
if (unlikely(cpu_intensive))
-@@ -2320,7 +2320,7 @@ static void process_one_work(struct work
+@@ -2318,7 +2318,7 @@ static void process_one_work(struct work
* fetches a work from the top and executes it.
*
* CONTEXT:
@@ -351,7 +369,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* multiple times.
*/
static void process_scheduled_works(struct worker *worker)
-@@ -2362,11 +2362,11 @@ static int worker_thread(void *__worker)
+@@ -2360,11 +2360,11 @@ static int worker_thread(void *__worker)
/* tell the scheduler that this is a workqueue worker */
set_pf_worker(true);
woke_up:
@@ -365,7 +383,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
WARN_ON_ONCE(!list_empty(&worker->entry));
set_pf_worker(false);
-@@ -2432,7 +2432,7 @@ static int worker_thread(void *__worker)
+@@ -2430,7 +2430,7 @@ static int worker_thread(void *__worker)
*/
worker_enter_idle(worker);
__set_current_state(TASK_IDLE);
@@ -374,7 +392,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
schedule();
goto woke_up;
}
-@@ -2486,7 +2486,7 @@ static int rescuer_thread(void *__rescue
+@@ -2484,7 +2484,7 @@ static int rescuer_thread(void *__rescue
should_stop = kthread_should_stop();
/* see whether any pwq is asking for help */
@@ -383,7 +401,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
while (!list_empty(&wq->maydays)) {
struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
-@@ -2498,11 +2498,11 @@ static int rescuer_thread(void *__rescue
+@@ -2496,11 +2496,11 @@ static int rescuer_thread(void *__rescue
__set_current_state(TASK_RUNNING);
list_del_init(&pwq->mayday_node);
@@ -397,7 +415,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Slurp in all works issued via this workqueue and
-@@ -2531,7 +2531,7 @@ static int rescuer_thread(void *__rescue
+@@ -2529,7 +2529,7 @@ static int rescuer_thread(void *__rescue
* incur MAYDAY_INTERVAL delay inbetween.
*/
if (need_to_create_worker(pool)) {
@@ -406,7 +424,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Queue iff we aren't racing destruction
* and somebody else hasn't queued it already.
-@@ -2540,7 +2540,7 @@ static int rescuer_thread(void *__rescue
+@@ -2538,7 +2538,7 @@ static int rescuer_thread(void *__rescue
get_pwq(pwq);
list_add_tail(&pwq->mayday_node, &wq->maydays);
}
@@ -415,7 +433,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -2558,14 +2558,14 @@ static int rescuer_thread(void *__rescue
+@@ -2556,14 +2556,14 @@ static int rescuer_thread(void *__rescue
if (need_more_worker(pool))
wake_up_worker(pool);
@@ -433,7 +451,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (should_stop) {
__set_current_state(TASK_RUNNING);
-@@ -2645,7 +2645,7 @@ static void wq_barrier_func(struct work_
+@@ -2643,7 +2643,7 @@ static void wq_barrier_func(struct work_
* underneath us, so we can't reliably determine pwq from @target.
*
* CONTEXT:
@@ -442,7 +460,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
static void insert_wq_barrier(struct pool_workqueue *pwq,
struct wq_barrier *barr,
-@@ -2732,7 +2732,7 @@ static bool flush_workqueue_prep_pwqs(st
+@@ -2730,7 +2730,7 @@ static bool flush_workqueue_prep_pwqs(st
for_each_pwq(pwq, wq) {
struct worker_pool *pool = pwq->pool;
@@ -451,7 +469,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (flush_color >= 0) {
WARN_ON_ONCE(pwq->flush_color != -1);
-@@ -2749,7 +2749,7 @@ static bool flush_workqueue_prep_pwqs(st
+@@ -2747,7 +2747,7 @@ static bool flush_workqueue_prep_pwqs(st
pwq->work_color = work_color;
}
@@ -460,7 +478,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
-@@ -2949,9 +2949,9 @@ void drain_workqueue(struct workqueue_st
+@@ -2947,9 +2947,9 @@ void drain_workqueue(struct workqueue_st
for_each_pwq(pwq, wq) {
bool drained;
@@ -472,7 +490,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (drained)
continue;
-@@ -2987,7 +2987,7 @@ static bool start_flush_work(struct work
+@@ -2985,7 +2985,7 @@ static bool start_flush_work(struct work
return false;
}
@@ -481,7 +499,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* see the comment in try_to_grab_pending() with the same code */
pwq = get_work_pwq(work);
if (pwq) {
-@@ -3003,7 +3003,7 @@ static bool start_flush_work(struct work
+@@ -3001,7 +3001,7 @@ static bool start_flush_work(struct work
check_flush_dependency(pwq->wq, work);
insert_wq_barrier(pwq, barr, work, worker);
@@ -490,7 +508,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Force a lock recursion deadlock when using flush_work() inside a
-@@ -3022,7 +3022,7 @@ static bool start_flush_work(struct work
+@@ -3020,7 +3020,7 @@ static bool start_flush_work(struct work
rcu_read_unlock();
return true;
already_gone:
@@ -499,7 +517,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rcu_read_unlock();
return false;
}
-@@ -3415,7 +3415,7 @@ static bool wqattrs_equal(const struct w
+@@ -3413,7 +3413,7 @@ static bool wqattrs_equal(const struct w
*/
static int init_worker_pool(struct worker_pool *pool)
{
@@ -508,16 +526,20 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pool->id = -1;
pool->cpu = -1;
pool->node = NUMA_NO_NODE;
-@@ -3541,7 +3541,7 @@ static void put_unbound_pool(struct work
- * @pool's workers from blocking on attach_mutex. We're the last
- * manager and @pool gets freed with the flag set.
- */
+@@ -3506,10 +3506,10 @@ static void rcu_free_pool(struct rcu_hea
+ /* This returns with the lock held on success (pool manager is inactive). */
+ static bool wq_manager_inactive(struct worker_pool *pool)
+ {
- spin_lock_irq(&pool->lock);
+ raw_spin_lock_irq(&pool->lock);
- swait_event_lock_irq(wq_manager_wait,
- !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
- pool->flags |= POOL_MANAGER_ACTIVE;
-@@ -3549,7 +3549,7 @@ static void put_unbound_pool(struct work
+
+ if (pool->flags & POOL_MANAGER_ACTIVE) {
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ return false;
+ }
+ return true;
+@@ -3559,7 +3559,7 @@ static void put_unbound_pool(struct work
while ((worker = first_idle_worker(pool)))
destroy_worker(worker);
WARN_ON(pool->nr_workers || pool->nr_idle);
@@ -526,7 +548,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
mutex_lock(&wq_pool_attach_mutex);
if (!list_empty(&pool->workers))
-@@ -3705,7 +3705,7 @@ static void pwq_adjust_max_active(struct
+@@ -3715,7 +3715,7 @@ static void pwq_adjust_max_active(struct
return;
/* this function can be called during early boot w/ irq disabled */
@@ -535,7 +557,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* During [un]freezing, the caller is responsible for ensuring that
-@@ -3728,7 +3728,7 @@ static void pwq_adjust_max_active(struct
+@@ -3738,7 +3738,7 @@ static void pwq_adjust_max_active(struct
pwq->max_active = 0;
}
@@ -544,7 +566,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/* initialize newly alloced @pwq which is associated with @wq and @pool */
-@@ -4130,9 +4130,9 @@ static void wq_update_unbound_numa(struc
+@@ -4140,9 +4140,9 @@ static void wq_update_unbound_numa(struc
use_dfl_pwq:
mutex_lock(&wq->mutex);
@@ -556,7 +578,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
out_unlock:
mutex_unlock(&wq->mutex);
-@@ -4361,9 +4361,9 @@ void destroy_workqueue(struct workqueue_
+@@ -4371,9 +4371,9 @@ void destroy_workqueue(struct workqueue_
struct worker *rescuer = wq->rescuer;
/* this prevents new queueing */
@@ -568,7 +590,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* rescuer will empty maydays list before exiting */
kthread_stop(rescuer->task);
-@@ -4377,18 +4377,18 @@ void destroy_workqueue(struct workqueue_
+@@ -4387,18 +4387,18 @@ void destroy_workqueue(struct workqueue_
mutex_lock(&wq_pool_mutex);
mutex_lock(&wq->mutex);
for_each_pwq(pwq, wq) {
@@ -590,7 +612,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
mutex_unlock(&wq->mutex);
mutex_unlock(&wq_pool_mutex);
-@@ -4559,10 +4559,10 @@ unsigned int work_busy(struct work_struc
+@@ -4569,10 +4569,10 @@ unsigned int work_busy(struct work_struc
rcu_read_lock();
pool = get_work_pool(work);
if (pool) {
@@ -603,7 +625,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
rcu_read_unlock();
-@@ -4769,10 +4769,10 @@ void show_workqueue_state(void)
+@@ -4779,10 +4779,10 @@ void show_workqueue_state(void)
pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
for_each_pwq(pwq, wq) {
@@ -616,7 +638,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* We could be printing a lot from atomic context, e.g.
* sysrq-t -> show_workqueue_state(). Avoid triggering
-@@ -4786,7 +4786,7 @@ void show_workqueue_state(void)
+@@ -4796,7 +4796,7 @@ void show_workqueue_state(void)
struct worker *worker;
bool first = true;
@@ -625,7 +647,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (pool->nr_workers == pool->nr_idle)
goto next_pool;
-@@ -4805,7 +4805,7 @@ void show_workqueue_state(void)
+@@ -4815,7 +4815,7 @@ void show_workqueue_state(void)
}
pr_cont("\n");
next_pool:
@@ -634,7 +656,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* We could be printing a lot from atomic context, e.g.
* sysrq-t -> show_workqueue_state(). Avoid triggering
-@@ -4835,7 +4835,7 @@ void wq_worker_comm(char *buf, size_t si
+@@ -4845,7 +4845,7 @@ void wq_worker_comm(char *buf, size_t si
struct worker_pool *pool = worker->pool;
if (pool) {
@@ -643,7 +665,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* ->desc tracks information (wq name or
* set_worker_desc()) for the latest execution. If
-@@ -4849,7 +4849,7 @@ void wq_worker_comm(char *buf, size_t si
+@@ -4859,7 +4859,7 @@ void wq_worker_comm(char *buf, size_t si
scnprintf(buf + off, size - off, "-%s",
worker->desc);
}
@@ -652,7 +674,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -4880,7 +4880,7 @@ static void unbind_workers(int cpu)
+@@ -4890,7 +4890,7 @@ static void unbind_workers(int cpu)
for_each_cpu_worker_pool(pool, cpu) {
mutex_lock(&wq_pool_attach_mutex);
@@ -661,7 +683,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* We've blocked all attach/detach operations. Make all workers
-@@ -4894,7 +4894,7 @@ static void unbind_workers(int cpu)
+@@ -4904,7 +4904,7 @@ static void unbind_workers(int cpu)
pool->flags |= POOL_DISASSOCIATED;
@@ -670,7 +692,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
mutex_unlock(&wq_pool_attach_mutex);
/*
-@@ -4920,9 +4920,9 @@ static void unbind_workers(int cpu)
+@@ -4930,9 +4930,9 @@ static void unbind_workers(int cpu)
* worker blocking could lead to lengthy stalls. Kick off
* unbound chain execution of currently pending work items.
*/
@@ -682,7 +704,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -4949,7 +4949,7 @@ static void rebind_workers(struct worker
+@@ -4959,7 +4959,7 @@ static void rebind_workers(struct worker
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
pool->attrs->cpumask) < 0);
@@ -691,7 +713,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pool->flags &= ~POOL_DISASSOCIATED;
-@@ -4988,7 +4988,7 @@ static void rebind_workers(struct worker
+@@ -4998,7 +4998,7 @@ static void rebind_workers(struct worker
WRITE_ONCE(worker->flags, worker_flags);
}
diff --git a/patches/workqueue-Use-rcuwait-for-wq_manager_wait.patch b/patches/workqueue-Use-rcuwait-for-wq_manager_wait.patch
new file mode 100644
index 000000000000..d4e4c1e4bd64
--- /dev/null
+++ b/patches/workqueue-Use-rcuwait-for-wq_manager_wait.patch
@@ -0,0 +1,88 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 27 May 2020 21:46:32 +0200
+Subject: [PATCH] workqueue: Use rcuwait for wq_manager_wait
+
+[ Upstream commit d8bb65ab70f702531aaaa11d9710f9450078e295 ]
+
+The workqueue code has it's internal spinlock (pool::lock) and also
+implicit spinlock usage in the wq_manager waitqueue. These spinlocks
+are converted to 'sleeping' spinlocks on a RT-kernel.
+
+Workqueue functions can be invoked from contexts which are truly atomic
+even on a PREEMPT_RT enabled kernel. Taking sleeping locks from such
+contexts is forbidden.
+
+pool::lock can be converted to a raw spinlock as the lock held times
+are short. But the workqueue manager waitqueue is handled inside of
+pool::lock held regions which again violates the lock nesting rules
+of raw and regular spinlocks.
+
+The manager waitqueue has no special requirements like custom wakeup
+callbacks or mass wakeups. While it does not use exclusive wait mode
+explicitly there is no strict requirement to queue the waiters in a
+particular order as there is only one waiter at a time.
+
+This allows to replace the waitqueue with rcuwait which solves the
+locking problem because rcuwait relies on existing locking.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/workqueue.c | 23 ++++++++++++++++++-----
+ 1 file changed, 18 insertions(+), 5 deletions(-)
+
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -301,7 +301,8 @@ static struct workqueue_attrs *wq_update
+ static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
+ static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
+ static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
+-static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
++/* wait for manager to go away */
++static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
+
+ static LIST_HEAD(workqueues); /* PR: list of all workqueues */
+ static bool workqueue_freezing; /* PL: have wqs started freezing? */
+@@ -2138,7 +2139,7 @@ static bool manage_workers(struct worker
+
+ pool->manager = NULL;
+ pool->flags &= ~POOL_MANAGER_ACTIVE;
+- wake_up(&wq_manager_wait);
++ rcuwait_wake_up(&manager_wait);
+ return true;
+ }
+
+@@ -3502,6 +3503,18 @@ static void rcu_free_pool(struct rcu_hea
+ kfree(pool);
+ }
+
++/* This returns with the lock held on success (pool manager is inactive). */
++static bool wq_manager_inactive(struct worker_pool *pool)
++{
++ spin_lock_irq(&pool->lock);
++
++ if (pool->flags & POOL_MANAGER_ACTIVE) {
++ spin_unlock_irq(&pool->lock);
++ return false;
++ }
++ return true;
++}
++
+ /**
+ * put_unbound_pool - put a worker_pool
+ * @pool: worker_pool to put
+@@ -3537,10 +3550,10 @@ static void put_unbound_pool(struct work
+ * Become the manager and destroy all workers. This prevents
+ * @pool's workers from blocking on attach_mutex. We're the last
+ * manager and @pool gets freed with the flag set.
++ * Because of how wq_manager_inactive() works, we will hold the
++ * spinlock after a successful wait.
+ */
+- spin_lock_irq(&pool->lock);
+- wait_event_lock_irq(wq_manager_wait,
+- !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
++ rcuwait_wait_event(&manager_wait, wq_manager_inactive(pool));
+ pool->flags |= POOL_MANAGER_ACTIVE;
+
+ while ((worker = first_idle_worker(pool)))
diff --git a/patches/x86-Disable-HAVE_ARCH_JUMP_LABEL.patch b/patches/x86-Disable-HAVE_ARCH_JUMP_LABEL.patch
index fa2ef1797bcc..5917bd5b9a93 100644
--- a/patches/x86-Disable-HAVE_ARCH_JUMP_LABEL.patch
+++ b/patches/x86-Disable-HAVE_ARCH_JUMP_LABEL.patch
@@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -134,8 +134,8 @@ config X86
+@@ -135,8 +135,8 @@ config X86
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
diff --git a/patches/x86-Enable-RT-also-on-32bit.patch b/patches/x86-Enable-RT-also-on-32bit.patch
index f9d174e32d59..e48fa03594c4 100644
--- a/patches/x86-Enable-RT-also-on-32bit.patch
+++ b/patches/x86-Enable-RT-also-on-32bit.patch
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
select ARCH_USE_CMPXCHG_LOCKREF
select HAVE_ARCH_SOFT_DIRTY
select MODULES_USE_ELF_RELA
-@@ -90,6 +89,7 @@ config X86
+@@ -91,6 +90,7 @@ config X86
select ARCH_SUPPORTS_ACPI
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
diff --git a/patches/x86-preempt-lazy.patch b/patches/x86-preempt-lazy.patch
index b7ff4e4adc35..2617e73886a2 100644
--- a/patches/x86-preempt-lazy.patch
+++ b/patches/x86-preempt-lazy.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -204,6 +204,7 @@ config X86
+@@ -205,6 +205,7 @@ config X86
select HAVE_PCI
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP