summaryrefslogtreecommitdiff
path: root/patches/series
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2017-07-11 18:05:00 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2017-07-11 18:05:00 +0200
commit66fd4012d22f2f69d71b619111485d64bda99513 (patch)
tree644a388307458dac4d7c907bda1ab40bfb69dcea /patches/series
parent2b9779a5b8e7553fc13e79aa4e68912447abdc82 (diff)
downloadlinux-rt-66fd4012d22f2f69d71b619111485d64bda99513.tar.gz
[ANNOUNCE] v4.11.9-rt7v4.11.9-rt7-patches
Dear RT folks! I'm pleased to announce the v4.11.9-rt7 patch set. Changes since v4.11.9-rt6: - Alex Shi fixed a "scheduling while atomic" bug on arm64 in the CPU idle code. - Vikram Mulukutla reported a problem where a parked CPU-hotplug thread was still on the runqueue. Patched by Thomas Gleixner. Known issues - CPU hotplug got a little better but can deadlock. The delta patch against v4.11.9-rt6 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.11/incr/patch-4.11.9-rt6-rt7.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.11.9-rt7 The RT patch against v4.11.9 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patch-4.11.9-rt7.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.9-rt7.tar.xz Sebastian diff --git a/kernel/cpu.c b/kernel/cpu.c --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -627,13 +627,25 @@ void cpu_hotplug_enable(void) EXPORT_SYMBOL_GPL(cpu_hotplug_enable); #endif /* CONFIG_HOTPLUG_CPU */ -/* Notifier wrappers for transitioning to state machine */ +static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st); static int bringup_wait_for_ap(unsigned int cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); + /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */ wait_for_completion(&st->done); + BUG_ON(!cpu_online(cpu)); + + /* Unpark the stopper thread and the hotplug thread of the target cpu */ + stop_machine_unpark(cpu); + kthread_unpark(st->thread); + + /* Should we go further up ? */ + if (st->target > CPUHP_AP_ONLINE_IDLE) { + __cpuhp_kick_ap_work(st); + wait_for_completion(&st->done); + } return st->result; } @@ -654,9 +666,7 @@ static int bringup_cpu(unsigned int cpu) irq_unlock_sparse(); if (ret) return ret; - ret = bringup_wait_for_ap(cpu); - BUG_ON(!cpu_online(cpu)); - return ret; + return bringup_wait_for_ap(cpu); } /* @@ -1181,31 +1191,20 @@ void notify_cpu_starting(unsigned int cpu) } /* - * Called from the idle task. We need to set active here, so we can kick off - * the stopper thread and unpark the smpboot threads. If the target state is - * beyond CPUHP_AP_ONLINE_IDLE we kick cpuhp thread and let it bring up the - * cpu further. + * Called from the idle task. Wake up the controlling task which brings the + * stopper and the hotplug thread of the upcoming CPU up and then delegates + * the rest of the online bringup to the hotplug thread. */ void cpuhp_online_idle(enum cpuhp_state state) { struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); - unsigned int cpu = smp_processor_id(); /* Happens for the boot cpu */ if (state != CPUHP_AP_ONLINE_IDLE) return; st->state = CPUHP_AP_ONLINE_IDLE; - - /* Unpark the stopper thread and the hotplug thread of this cpu */ - stop_machine_unpark(cpu); - kthread_unpark(st->thread); - - /* Should we go further up ? */ - if (st->target > CPUHP_AP_ONLINE_IDLE) - __cpuhp_kick_ap_work(st); - else - complete(&st->done); + complete(&st->done); } /* Requires cpu_add_remove_lock to be held */ diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c --- a/kernel/cpu_pm.c +++ b/kernel/cpu_pm.c @@ -22,14 +22,13 @@ #include <linux/spinlock.h> #include <linux/syscore_ops.h> -static DEFINE_RWLOCK(cpu_pm_notifier_lock); -static RAW_NOTIFIER_HEAD(cpu_pm_notifier_chain); +static ATOMIC_NOTIFIER_HEAD(cpu_pm_notifier_chain); static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls) { int ret; - ret = __raw_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL, + ret = __atomic_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL, nr_to_call, nr_calls); return notifier_to_errno(ret); @@ -47,14 +46,7 @@ static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls) */ int cpu_pm_register_notifier(struct notifier_block *nb) { - unsigned long flags; - int ret; - - write_lock_irqsave(&cpu_pm_notifier_lock, flags); - ret = raw_notifier_chain_register(&cpu_pm_notifier_chain, nb); - write_unlock_irqrestore(&cpu_pm_notifier_lock, flags); - - return ret; + return atomic_notifier_chain_register(&cpu_pm_notifier_chain, nb); } EXPORT_SYMBOL_GPL(cpu_pm_register_notifier); @@ -69,14 +61,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_register_notifier); */ int cpu_pm_unregister_notifier(struct notifier_block *nb) { - unsigned long flags; - int ret; - - write_lock_irqsave(&cpu_pm_notifier_lock, flags); - ret = raw_notifier_chain_unregister(&cpu_pm_notifier_chain, nb); - write_unlock_irqrestore(&cpu_pm_notifier_lock, flags); - - return ret; + return atomic_notifier_chain_unregister(&cpu_pm_notifier_chain, nb); } EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier); @@ -100,7 +85,6 @@ int cpu_pm_enter(void) int nr_calls; int ret = 0; - read_lock(&cpu_pm_notifier_lock); ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls); if (ret) /* @@ -108,7 +92,6 @@ int cpu_pm_enter(void) * PM entry who are notified earlier to prepare for it. */ cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL); - read_unlock(&cpu_pm_notifier_lock); return ret; } @@ -128,13 +111,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_enter); */ int cpu_pm_exit(void) { - int ret; - - read_lock(&cpu_pm_notifier_lock); - ret = cpu_pm_notify(CPU_PM_EXIT, -1, NULL); - read_unlock(&cpu_pm_notifier_lock); - - return ret; + return cpu_pm_notify(CPU_PM_EXIT, -1, NULL); } EXPORT_SYMBOL_GPL(cpu_pm_exit); @@ -159,7 +136,6 @@ int cpu_cluster_pm_enter(void) int nr_calls; int ret = 0; - read_lock(&cpu_pm_notifier_lock); ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls); if (ret) /* @@ -167,7 +143,6 @@ int cpu_cluster_pm_enter(void) * PM entry who are notified earlier to prepare for it. */ cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL); - read_unlock(&cpu_pm_notifier_lock); return ret; } @@ -190,13 +165,7 @@ EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter); */ int cpu_cluster_pm_exit(void) { - int ret; - - read_lock(&cpu_pm_notifier_lock); - ret = cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL); - read_unlock(&cpu_pm_notifier_lock); - - return ret; + return cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL); } EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit); diff --git a/localversion-rt b/localversion-rt --- a/localversion-rt +++ b/localversion-rt @@ -1 +1 @@ --rt6 +-rt7 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Diffstat (limited to 'patches/series')
-rw-r--r--patches/series2
1 files changed, 2 insertions, 0 deletions
diff --git a/patches/series b/patches/series
index 8547a3f47cda..fcfffa8bc661 100644
--- a/patches/series
+++ b/patches/series
@@ -47,6 +47,7 @@ lockdep-Fix-per-cpu-static-objects.patch
futex-rt_mutex-Fix-rt_mutex_cleanup_proxy_lock.patch
arm64-cpufeature-don-t-use-mutex-in-bringup-path.patch
+smp-hotplug-Move-unparking-of-percpu-threads-to-the-.patch
###
# get_online_cpus() rework.
@@ -138,6 +139,7 @@ iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch
iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
rxrpc-remove-unused-static-variables.patch
mm-swap-don-t-disable-preemption-while-taking-the-pe.patch
+cpu_pm-replace-raw_notifier-to-atomic_notifier.patch
# Wants a different fix for upstream
NFSv4-replace-seqcount_t-with-a-seqlock_t.patch