summaryrefslogtreecommitdiff
path: root/patches/0001_cpu_pm_make_notifier_chain_use_a_raw_spinlock_t.patch
diff options
context:
space:
mode:
Diffstat (limited to 'patches/0001_cpu_pm_make_notifier_chain_use_a_raw_spinlock_t.patch')
-rw-r--r--patches/0001_cpu_pm_make_notifier_chain_use_a_raw_spinlock_t.patch121
1 files changed, 121 insertions, 0 deletions
diff --git a/patches/0001_cpu_pm_make_notifier_chain_use_a_raw_spinlock_t.patch b/patches/0001_cpu_pm_make_notifier_chain_use_a_raw_spinlock_t.patch
new file mode 100644
index 000000000000..2d3f77768638
--- /dev/null
+++ b/patches/0001_cpu_pm_make_notifier_chain_use_a_raw_spinlock_t.patch
@@ -0,0 +1,121 @@
+From: Valentin Schneider <valentin.schneider@arm.com>
+Subject: cpu_pm: Make notifier chain use a raw_spinlock_t
+Date: Wed, 11 Aug 2021 21:14:31 +0100
+
+Invoking atomic_notifier_chain_notify() requires acquiring a spinlock_t,
+which can block under CONFIG_PREEMPT_RT. Notifications for members of the
+cpu_pm notification chain will be issued by the idle task, which can never
+block.
+
+Making *all* atomic_notifiers use a raw_spinlock is too big of a hammer, as
+only notifications issued by the idle task are problematic.
+
+Special-case cpu_pm_notifier_chain by kludging a raw_notifier and
+raw_spinlock_t together, matching the atomic_notifier behavior with a
+raw_spinlock_t.
+
+Fixes: 70d932985757 ("notifier: Fix broken error handling pattern")
+Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20210811201432.1976916-2-valentin.schneider@arm.com
+---
+ kernel/cpu_pm.c | 50 ++++++++++++++++++++++++++++++++++++++------------
+ 1 file changed, 38 insertions(+), 12 deletions(-)
+
+--- a/kernel/cpu_pm.c
++++ b/kernel/cpu_pm.c
+@@ -13,19 +13,32 @@
+ #include <linux/spinlock.h>
+ #include <linux/syscore_ops.h>
+
+-static ATOMIC_NOTIFIER_HEAD(cpu_pm_notifier_chain);
++/*
++ * atomic_notifiers use a spinlock_t, which can block under PREEMPT_RT.
++ * Notifications for cpu_pm will be issued by the idle task itself, which can
++ * never block, IOW it requires using a raw_spinlock_t.
++ */
++static struct {
++ struct raw_notifier_head chain;
++ raw_spinlock_t lock;
++} cpu_pm_notifier = {
++ .chain = RAW_NOTIFIER_INIT(cpu_pm_notifier.chain),
++ .lock = __RAW_SPIN_LOCK_UNLOCKED(cpu_pm_notifier.lock),
++};
+
+ static int cpu_pm_notify(enum cpu_pm_event event)
+ {
+ int ret;
+
+ /*
+- * atomic_notifier_call_chain has a RCU read critical section, which
+- * could be disfunctional in cpu idle. Copy RCU_NONIDLE code to let
+- * RCU know this.
++ * This introduces a RCU read critical section, which could be
++ * disfunctional in cpu idle. Copy RCU_NONIDLE code to let RCU know
++ * this.
+ */
+ rcu_irq_enter_irqson();
+- ret = atomic_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL);
++ rcu_read_lock();
++ ret = raw_notifier_call_chain(&cpu_pm_notifier.chain, event, NULL);
++ rcu_read_unlock();
+ rcu_irq_exit_irqson();
+
+ return notifier_to_errno(ret);
+@@ -33,10 +46,13 @@ static int cpu_pm_notify(enum cpu_pm_eve
+
+ static int cpu_pm_notify_robust(enum cpu_pm_event event_up, enum cpu_pm_event event_down)
+ {
++ unsigned long flags;
+ int ret;
+
+ rcu_irq_enter_irqson();
+- ret = atomic_notifier_call_chain_robust(&cpu_pm_notifier_chain, event_up, event_down, NULL);
++ raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags);
++ ret = raw_notifier_call_chain_robust(&cpu_pm_notifier.chain, event_up, event_down, NULL);
++ raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags);
+ rcu_irq_exit_irqson();
+
+ return notifier_to_errno(ret);
+@@ -49,12 +65,17 @@ static int cpu_pm_notify_robust(enum cpu
+ * Add a driver to a list of drivers that are notified about
+ * CPU and CPU cluster low power entry and exit.
+ *
+- * This function may sleep, and has the same return conditions as
+- * raw_notifier_chain_register.
++ * This function has the same return conditions as raw_notifier_chain_register.
+ */
+ int cpu_pm_register_notifier(struct notifier_block *nb)
+ {
+- return atomic_notifier_chain_register(&cpu_pm_notifier_chain, nb);
++ unsigned long flags;
++ int ret;
++
++ raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags);
++ ret = raw_notifier_chain_register(&cpu_pm_notifier.chain, nb);
++ raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags);
++ return ret;
+ }
+ EXPORT_SYMBOL_GPL(cpu_pm_register_notifier);
+
+@@ -64,12 +85,17 @@ EXPORT_SYMBOL_GPL(cpu_pm_register_notifi
+ *
+ * Remove a driver from the CPU PM notifier list.
+ *
+- * This function may sleep, and has the same return conditions as
+- * raw_notifier_chain_unregister.
++ * This function has the same return conditions as raw_notifier_chain_unregister.
+ */
+ int cpu_pm_unregister_notifier(struct notifier_block *nb)
+ {
+- return atomic_notifier_chain_unregister(&cpu_pm_notifier_chain, nb);
++ unsigned long flags;
++ int ret;
++
++ raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags);
++ ret = raw_notifier_chain_unregister(&cpu_pm_notifier.chain, nb);
++ raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags);
++ return ret;
+ }
+ EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
+