summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2017-07-11 18:05:00 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2017-07-11 18:05:00 +0200
commit66fd4012d22f2f69d71b619111485d64bda99513 (patch)
tree644a388307458dac4d7c907bda1ab40bfb69dcea
parent2b9779a5b8e7553fc13e79aa4e68912447abdc82 (diff)
downloadlinux-rt-66fd4012d22f2f69d71b619111485d64bda99513.tar.gz
[ANNOUNCE] v4.11.9-rt7v4.11.9-rt7-patches
Dear RT folks! I'm pleased to announce the v4.11.9-rt7 patch set. Changes since v4.11.9-rt6: - Alex Shi fixed a "scheduling while atomic" bug on arm64 in the CPU idle code. - Vikram Mulukutla reported a problem where a parked CPU-hotplug thread was still on the runqueue. Patched by Thomas Gleixner. Known issues - CPU hotplug got a little better but can deadlock. The delta patch against v4.11.9-rt6 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.11/incr/patch-4.11.9-rt6-rt7.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.11.9-rt7 The RT patch against v4.11.9 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patch-4.11.9-rt7.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.9-rt7.tar.xz Sebastian diff --git a/kernel/cpu.c b/kernel/cpu.c --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -627,13 +627,25 @@ void cpu_hotplug_enable(void) EXPORT_SYMBOL_GPL(cpu_hotplug_enable); #endif /* CONFIG_HOTPLUG_CPU */ -/* Notifier wrappers for transitioning to state machine */ +static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st); static int bringup_wait_for_ap(unsigned int cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); + /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */ wait_for_completion(&st->done); + BUG_ON(!cpu_online(cpu)); + + /* Unpark the stopper thread and the hotplug thread of the target cpu */ + stop_machine_unpark(cpu); + kthread_unpark(st->thread); + + /* Should we go further up ? */ + if (st->target > CPUHP_AP_ONLINE_IDLE) { + __cpuhp_kick_ap_work(st); + wait_for_completion(&st->done); + } return st->result; } @@ -654,9 +666,7 @@ static int bringup_cpu(unsigned int cpu) irq_unlock_sparse(); if (ret) return ret; - ret = bringup_wait_for_ap(cpu); - BUG_ON(!cpu_online(cpu)); - return ret; + return bringup_wait_for_ap(cpu); } /* @@ -1181,31 +1191,20 @@ void notify_cpu_starting(unsigned int cpu) } /* - * Called from the idle task. We need to set active here, so we can kick off - * the stopper thread and unpark the smpboot threads. If the target state is - * beyond CPUHP_AP_ONLINE_IDLE we kick cpuhp thread and let it bring up the - * cpu further. + * Called from the idle task. Wake up the controlling task which brings the + * stopper and the hotplug thread of the upcoming CPU up and then delegates + * the rest of the online bringup to the hotplug thread. */ void cpuhp_online_idle(enum cpuhp_state state) { struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); - unsigned int cpu = smp_processor_id(); /* Happens for the boot cpu */ if (state != CPUHP_AP_ONLINE_IDLE) return; st->state = CPUHP_AP_ONLINE_IDLE; - - /* Unpark the stopper thread and the hotplug thread of this cpu */ - stop_machine_unpark(cpu); - kthread_unpark(st->thread); - - /* Should we go further up ? */ - if (st->target > CPUHP_AP_ONLINE_IDLE) - __cpuhp_kick_ap_work(st); - else - complete(&st->done); + complete(&st->done); } /* Requires cpu_add_remove_lock to be held */ diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c --- a/kernel/cpu_pm.c +++ b/kernel/cpu_pm.c @@ -22,14 +22,13 @@ #include <linux/spinlock.h> #include <linux/syscore_ops.h> -static DEFINE_RWLOCK(cpu_pm_notifier_lock); -static RAW_NOTIFIER_HEAD(cpu_pm_notifier_chain); +static ATOMIC_NOTIFIER_HEAD(cpu_pm_notifier_chain); static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls) { int ret; - ret = __raw_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL, + ret = __atomic_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL, nr_to_call, nr_calls); return notifier_to_errno(ret); @@ -47,14 +46,7 @@ static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls) */ int cpu_pm_register_notifier(struct notifier_block *nb) { - unsigned long flags; - int ret; - - write_lock_irqsave(&cpu_pm_notifier_lock, flags); - ret = raw_notifier_chain_register(&cpu_pm_notifier_chain, nb); - write_unlock_irqrestore(&cpu_pm_notifier_lock, flags); - - return ret; + return atomic_notifier_chain_register(&cpu_pm_notifier_chain, nb); } EXPORT_SYMBOL_GPL(cpu_pm_register_notifier); @@ -69,14 +61,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_register_notifier); */ int cpu_pm_unregister_notifier(struct notifier_block *nb) { - unsigned long flags; - int ret; - - write_lock_irqsave(&cpu_pm_notifier_lock, flags); - ret = raw_notifier_chain_unregister(&cpu_pm_notifier_chain, nb); - write_unlock_irqrestore(&cpu_pm_notifier_lock, flags); - - return ret; + return atomic_notifier_chain_unregister(&cpu_pm_notifier_chain, nb); } EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier); @@ -100,7 +85,6 @@ int cpu_pm_enter(void) int nr_calls; int ret = 0; - read_lock(&cpu_pm_notifier_lock); ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls); if (ret) /* @@ -108,7 +92,6 @@ int cpu_pm_enter(void) * PM entry who are notified earlier to prepare for it. */ cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL); - read_unlock(&cpu_pm_notifier_lock); return ret; } @@ -128,13 +111,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_enter); */ int cpu_pm_exit(void) { - int ret; - - read_lock(&cpu_pm_notifier_lock); - ret = cpu_pm_notify(CPU_PM_EXIT, -1, NULL); - read_unlock(&cpu_pm_notifier_lock); - - return ret; + return cpu_pm_notify(CPU_PM_EXIT, -1, NULL); } EXPORT_SYMBOL_GPL(cpu_pm_exit); @@ -159,7 +136,6 @@ int cpu_cluster_pm_enter(void) int nr_calls; int ret = 0; - read_lock(&cpu_pm_notifier_lock); ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls); if (ret) /* @@ -167,7 +143,6 @@ int cpu_cluster_pm_enter(void) * PM entry who are notified earlier to prepare for it. */ cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL); - read_unlock(&cpu_pm_notifier_lock); return ret; } @@ -190,13 +165,7 @@ EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter); */ int cpu_cluster_pm_exit(void) { - int ret; - - read_lock(&cpu_pm_notifier_lock); - ret = cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL); - read_unlock(&cpu_pm_notifier_lock); - - return ret; + return cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL); } EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit); diff --git a/localversion-rt b/localversion-rt --- a/localversion-rt +++ b/localversion-rt @@ -1 +1 @@ --rt6 +-rt7 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch4
-rw-r--r--patches/cpu-rt-rework-cpu-down.patch2
-rw-r--r--patches/cpu_down_move_migrate_enable_back.patch4
-rw-r--r--patches/cpu_pm-replace-raw_notifier-to-atomic_notifier.patch172
-rw-r--r--patches/hotplug-light-get-online-cpus.patch6
-rw-r--r--patches/hotplug-use-migrate-disable.patch4
-rw-r--r--patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch2
-rw-r--r--patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch8
-rw-r--r--patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch2
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/mm-vmalloc-use-get-cpu-light.patch10
-rw-r--r--patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch4
-rw-r--r--patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch2
-rw-r--r--patches/net-core-remove-explicit-do_softirq-from-busy_poll_s.patch2
-rw-r--r--patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch2
-rw-r--r--patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch8
-rw-r--r--patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch2
-rw-r--r--patches/ping-sysrq.patch4
-rw-r--r--patches/series2
-rw-r--r--patches/skbufhead-raw-lock.patch16
-rw-r--r--patches/smp-hotplug-Move-unparking-of-percpu-threads-to-the-.patch157
-rw-r--r--patches/softirq-disable-softirq-stacks-for-rt.patch4
-rw-r--r--patches/softirq-preempt-fix-3-re.patch14
-rw-r--r--patches/softirq-split-locks.patch2
-rw-r--r--patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch4
-rw-r--r--patches/x86-kvm-require-const-tsc-for-rt.patch2
-rw-r--r--patches/x86-preempt-lazy.patch4
27 files changed, 388 insertions, 57 deletions
diff --git a/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch b/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
index 72457678877d..afa9e17fc14b 100644
--- a/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
+++ b/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
-@@ -2607,7 +2607,7 @@ static int _nfs4_open_and_get_state(stru
+@@ -2608,7 +2608,7 @@ static int _nfs4_open_and_get_state(stru
unsigned int seq;
int ret;
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
ret = _nfs4_proc_open(opendata);
if (ret != 0)
-@@ -2645,7 +2645,7 @@ static int _nfs4_open_and_get_state(stru
+@@ -2646,7 +2646,7 @@ static int _nfs4_open_and_get_state(stru
if (d_inode(dentry) == state->inode) {
nfs_inode_attach_open_context(ctx);
diff --git a/patches/cpu-rt-rework-cpu-down.patch b/patches/cpu-rt-rework-cpu-down.patch
index 80e93f2b8f9a..522e8389d299 100644
--- a/patches/cpu-rt-rework-cpu-down.patch
+++ b/patches/cpu-rt-rework-cpu-down.patch
@@ -426,7 +426,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
cpuhp_lock_release();
}
-@@ -806,6 +957,9 @@ static int takedown_cpu(unsigned int cpu
+@@ -816,6 +967,9 @@ static int takedown_cpu(unsigned int cpu
kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
smpboot_park_threads(cpu);
diff --git a/patches/cpu_down_move_migrate_enable_back.patch b/patches/cpu_down_move_migrate_enable_back.patch
index 8846874a132e..8fd55e52cd89 100644
--- a/patches/cpu_down_move_migrate_enable_back.patch
+++ b/patches/cpu_down_move_migrate_enable_back.patch
@@ -34,7 +34,7 @@ Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -1084,6 +1084,7 @@ static int __ref _cpu_down(unsigned int
+@@ -1094,6 +1094,7 @@ static int __ref _cpu_down(unsigned int
goto restore_cpus;
}
@@ -42,7 +42,7 @@ Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
cpu_hotplug_begin();
ret = cpu_unplug_begin(cpu);
if (ret) {
-@@ -1130,7 +1131,6 @@ static int __ref _cpu_down(unsigned int
+@@ -1140,7 +1141,6 @@ static int __ref _cpu_down(unsigned int
cpu_unplug_done(cpu);
out_cancel:
cpu_hotplug_done();
diff --git a/patches/cpu_pm-replace-raw_notifier-to-atomic_notifier.patch b/patches/cpu_pm-replace-raw_notifier-to-atomic_notifier.patch
new file mode 100644
index 000000000000..f24636b8b1f3
--- /dev/null
+++ b/patches/cpu_pm-replace-raw_notifier-to-atomic_notifier.patch
@@ -0,0 +1,172 @@
+From: Alex Shi <alex.shi@linaro.org>
+Date: Thu, 6 Jul 2017 16:47:46 +0800
+Subject: [PATCH] cpu_pm: replace raw_notifier to atomic_notifier
+
+This patch replace a rwlock and raw notifier by atomic notifier which
+protected by spin_lock and rcu.
+
+The first to reason to have this replace is due to a 'scheduling while
+ atomic' bug of RT kernel on arm/arm64 platform. On arm/arm64, rwlock
+cpu_pm_notifier_lock in cpu_pm cause a potential schedule after irq
+disable in idle call chain:
+
+cpu_startup_entry
+ cpu_idle_loop
+ local_irq_disable()
+ cpuidle_idle_call
+ call_cpuidle
+ cpuidle_enter
+ cpuidle_enter_state
+ ->enter :arm_enter_idle_state
+ cpu_pm_enter/exit
+ CPU_PM_CPU_IDLE_ENTER
+ read_lock(&cpu_pm_notifier_lock); <-- sleep in idle
+ __rt_spin_lock();
+ schedule();
+
+The kernel panic is here:
+[ 4.609601] BUG: scheduling while atomic: swapper/1/0/0x00000002
+[ 4.609608] [<ffff0000086fae70>] arm_enter_idle_state+0x18/0x70
+[ 4.609614] Modules linked in:
+[ 4.609615] [<ffff0000086f9298>] cpuidle_enter_state+0xf0/0x218
+[ 4.609620] [<ffff0000086f93f8>] cpuidle_enter+0x18/0x20
+[ 4.609626] Preemption disabled at:
+[ 4.609627] [<ffff0000080fa234>] call_cpuidle+0x24/0x40
+[ 4.609635] [<ffff000008882fa4>] schedule_preempt_disabled+0x1c/0x28
+[ 4.609639] [<ffff0000080fa49c>] cpu_startup_entry+0x154/0x1f8
+[ 4.609645] [<ffff00000808e004>] secondary_start_kernel+0x15c/0x1a0
+
+Daniel Lezcano said this notification is needed on arm/arm64 platforms.
+Sebastian suggested using atomic_notifier instead of rwlock, which is not
+only removing the sleeping in idle, but also getting better latency
+improvement.
+
+This patch passed Fengguang's 0day testing.
+
+Signed-off-by: Alex Shi <alex.shi@linaro.org>
+Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Anders Roxell <anders.roxell@linaro.org>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
+Cc: linux-rt-users <linux-rt-users@vger.kernel.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/cpu_pm.c | 43 ++++++-------------------------------------
+ 1 file changed, 6 insertions(+), 37 deletions(-)
+
+--- a/kernel/cpu_pm.c
++++ b/kernel/cpu_pm.c
+@@ -22,14 +22,13 @@
+ #include <linux/spinlock.h>
+ #include <linux/syscore_ops.h>
+
+-static DEFINE_RWLOCK(cpu_pm_notifier_lock);
+-static RAW_NOTIFIER_HEAD(cpu_pm_notifier_chain);
++static ATOMIC_NOTIFIER_HEAD(cpu_pm_notifier_chain);
+
+ static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls)
+ {
+ int ret;
+
+- ret = __raw_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL,
++ ret = __atomic_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL,
+ nr_to_call, nr_calls);
+
+ return notifier_to_errno(ret);
+@@ -47,14 +46,7 @@ static int cpu_pm_notify(enum cpu_pm_eve
+ */
+ int cpu_pm_register_notifier(struct notifier_block *nb)
+ {
+- unsigned long flags;
+- int ret;
+-
+- write_lock_irqsave(&cpu_pm_notifier_lock, flags);
+- ret = raw_notifier_chain_register(&cpu_pm_notifier_chain, nb);
+- write_unlock_irqrestore(&cpu_pm_notifier_lock, flags);
+-
+- return ret;
++ return atomic_notifier_chain_register(&cpu_pm_notifier_chain, nb);
+ }
+ EXPORT_SYMBOL_GPL(cpu_pm_register_notifier);
+
+@@ -69,14 +61,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_register_notifi
+ */
+ int cpu_pm_unregister_notifier(struct notifier_block *nb)
+ {
+- unsigned long flags;
+- int ret;
+-
+- write_lock_irqsave(&cpu_pm_notifier_lock, flags);
+- ret = raw_notifier_chain_unregister(&cpu_pm_notifier_chain, nb);
+- write_unlock_irqrestore(&cpu_pm_notifier_lock, flags);
+-
+- return ret;
++ return atomic_notifier_chain_unregister(&cpu_pm_notifier_chain, nb);
+ }
+ EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
+
+@@ -100,7 +85,6 @@ int cpu_pm_enter(void)
+ int nr_calls;
+ int ret = 0;
+
+- read_lock(&cpu_pm_notifier_lock);
+ ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls);
+ if (ret)
+ /*
+@@ -108,7 +92,6 @@ int cpu_pm_enter(void)
+ * PM entry who are notified earlier to prepare for it.
+ */
+ cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL);
+- read_unlock(&cpu_pm_notifier_lock);
+
+ return ret;
+ }
+@@ -128,13 +111,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_enter);
+ */
+ int cpu_pm_exit(void)
+ {
+- int ret;
+-
+- read_lock(&cpu_pm_notifier_lock);
+- ret = cpu_pm_notify(CPU_PM_EXIT, -1, NULL);
+- read_unlock(&cpu_pm_notifier_lock);
+-
+- return ret;
++ return cpu_pm_notify(CPU_PM_EXIT, -1, NULL);
+ }
+ EXPORT_SYMBOL_GPL(cpu_pm_exit);
+
+@@ -159,7 +136,6 @@ int cpu_cluster_pm_enter(void)
+ int nr_calls;
+ int ret = 0;
+
+- read_lock(&cpu_pm_notifier_lock);
+ ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls);
+ if (ret)
+ /*
+@@ -167,7 +143,6 @@ int cpu_cluster_pm_enter(void)
+ * PM entry who are notified earlier to prepare for it.
+ */
+ cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL);
+- read_unlock(&cpu_pm_notifier_lock);
+
+ return ret;
+ }
+@@ -190,13 +165,7 @@ EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
+ */
+ int cpu_cluster_pm_exit(void)
+ {
+- int ret;
+-
+- read_lock(&cpu_pm_notifier_lock);
+- ret = cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL);
+- read_unlock(&cpu_pm_notifier_lock);
+-
+- return ret;
++ return cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL);
+ }
+ EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit);
+
diff --git a/patches/hotplug-light-get-online-cpus.patch b/patches/hotplug-light-get-online-cpus.patch
index 778db0d943df..37bbe43a1b50 100644
--- a/patches/hotplug-light-get-online-cpus.patch
+++ b/patches/hotplug-light-get-online-cpus.patch
@@ -141,7 +141,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void get_online_cpus(void)
{
-@@ -766,6 +860,8 @@ static int __ref _cpu_down(unsigned int
+@@ -776,6 +870,8 @@ static int __ref _cpu_down(unsigned int
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int prev_state, ret = 0;
@@ -150,7 +150,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (num_online_cpus() == 1)
return -EBUSY;
-@@ -773,7 +869,27 @@ static int __ref _cpu_down(unsigned int
+@@ -783,7 +879,27 @@ static int __ref _cpu_down(unsigned int
if (!cpu_present(cpu))
return -EINVAL;
@@ -178,7 +178,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
cpuhp_tasks_frozen = tasks_frozen;
-@@ -811,6 +927,8 @@ static int __ref _cpu_down(unsigned int
+@@ -821,6 +937,8 @@ static int __ref _cpu_down(unsigned int
}
out:
diff --git a/patches/hotplug-use-migrate-disable.patch b/patches/hotplug-use-migrate-disable.patch
index 501094419116..4cb9ba5068bf 100644
--- a/patches/hotplug-use-migrate-disable.patch
+++ b/patches/hotplug-use-migrate-disable.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -877,14 +877,13 @@ static int __ref _cpu_down(unsigned int
+@@ -887,14 +887,13 @@ static int __ref _cpu_down(unsigned int
cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
set_cpus_allowed_ptr(current, cpumask);
free_cpumask_var(cpumask);
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
cpu_hotplug_begin();
ret = cpu_unplug_begin(cpu);
-@@ -932,6 +931,7 @@ static int __ref _cpu_down(unsigned int
+@@ -942,6 +941,7 @@ static int __ref _cpu_down(unsigned int
cpu_unplug_done(cpu);
out_cancel:
cpu_hotplug_done();
diff --git a/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch b/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
index a415094d8443..24f8c9a2c1ee 100644
--- a/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
+++ b/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
@@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -961,6 +971,7 @@ static int takedown_cpu(unsigned int cpu
+@@ -971,6 +981,7 @@ static int takedown_cpu(unsigned int cpu
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int err;
diff --git a/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch b/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
index 0e5f75631782..f84ffbd6050b 100644
--- a/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
+++ b/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -1055,6 +1055,7 @@ static int __ref _cpu_down(unsigned int
+@@ -1065,6 +1065,7 @@ static int __ref _cpu_down(unsigned int
int prev_state, ret = 0;
int mycpu;
cpumask_var_t cpumask;
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (num_online_cpus() == 1)
return -EBUSY;
-@@ -1065,6 +1066,12 @@ static int __ref _cpu_down(unsigned int
+@@ -1075,6 +1076,12 @@ static int __ref _cpu_down(unsigned int
/* Move the downtaker off the unplug cpu */
if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
return -ENOMEM;
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
set_cpus_allowed_ptr(current, cpumask);
free_cpumask_var(cpumask);
-@@ -1073,7 +1080,8 @@ static int __ref _cpu_down(unsigned int
+@@ -1083,7 +1090,8 @@ static int __ref _cpu_down(unsigned int
if (mycpu == cpu) {
printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
migrate_enable();
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
cpu_hotplug_begin();
-@@ -1123,6 +1131,9 @@ static int __ref _cpu_down(unsigned int
+@@ -1133,6 +1141,9 @@ static int __ref _cpu_down(unsigned int
out_cancel:
cpu_hotplug_done();
migrate_enable();
diff --git a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
index 99c69f5ad371..3a413d64b154 100644
--- a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
+++ b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
@@ -126,7 +126,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
out_unlock:
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
-@@ -1191,12 +1191,12 @@ static void mt_ase_fp_affinity(void)
+@@ -1193,12 +1193,12 @@ static void mt_ase_fp_affinity(void)
* restricted the allowed set to exclude any CPUs with FPUs,
* we'll skip the procedure.
*/
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 4c1841b6475d..bbb08330835d 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt6
++-rt7
diff --git a/patches/mm-vmalloc-use-get-cpu-light.patch b/patches/mm-vmalloc-use-get-cpu-light.patch
index 16c743428f61..38f2ce4b8a3b 100644
--- a/patches/mm-vmalloc-use-get-cpu-light.patch
+++ b/patches/mm-vmalloc-use-get-cpu-light.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
-@@ -855,7 +855,7 @@ static void *new_vmap_block(unsigned int
+@@ -866,7 +866,7 @@ static void *new_vmap_block(unsigned int
struct vmap_block *vb;
struct vmap_area *va;
unsigned long vb_idx;
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *vaddr;
node = numa_node_id();
-@@ -898,11 +898,12 @@ static void *new_vmap_block(unsigned int
+@@ -909,11 +909,12 @@ static void *new_vmap_block(unsigned int
BUG_ON(err);
radix_tree_preload_end();
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return vaddr;
}
-@@ -971,6 +972,7 @@ static void *vb_alloc(unsigned long size
+@@ -982,6 +983,7 @@ static void *vb_alloc(unsigned long size
struct vmap_block *vb;
void *vaddr = NULL;
unsigned int order;
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
BUG_ON(offset_in_page(size));
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
-@@ -985,7 +987,8 @@ static void *vb_alloc(unsigned long size
+@@ -996,7 +998,8 @@ static void *vb_alloc(unsigned long size
order = get_order(size);
rcu_read_lock();
@@ -54,7 +54,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
unsigned long pages_off;
-@@ -1008,7 +1011,7 @@ static void *vb_alloc(unsigned long size
+@@ -1019,7 +1022,7 @@ static void *vb_alloc(unsigned long size
break;
}
diff --git a/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch b/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
index 3a1979885423..cc3afdd73fb8 100644
--- a/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
+++ b/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
@@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -4961,6 +4961,7 @@ bool napi_schedule_prep(struct napi_stru
+@@ -4962,6 +4962,7 @@ bool napi_schedule_prep(struct napi_stru
}
EXPORT_SYMBOL(napi_schedule_prep);
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* __napi_schedule_irqoff - schedule for receive
* @n: entry to schedule
-@@ -4972,6 +4973,7 @@ void __napi_schedule_irqoff(struct napi_
+@@ -4973,6 +4974,7 @@ void __napi_schedule_irqoff(struct napi_
____napi_schedule(this_cpu_ptr(&softnet_data), n);
}
EXPORT_SYMBOL(__napi_schedule_irqoff);
diff --git a/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch b/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
index 2084dbdb06ff..0440e769effe 100644
--- a/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
+++ b/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
@@ -35,7 +35,7 @@ Cc: stable-rt@vger.kernel.org
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -8093,7 +8093,7 @@ static int dev_cpu_dead(unsigned int old
+@@ -8098,7 +8098,7 @@ static int dev_cpu_dead(unsigned int old
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
diff --git a/patches/net-core-remove-explicit-do_softirq-from-busy_poll_s.patch b/patches/net-core-remove-explicit-do_softirq-from-busy_poll_s.patch
index 553ab5e14cbf..6abe3ed1c459 100644
--- a/patches/net-core-remove-explicit-do_softirq-from-busy_poll_s.patch
+++ b/patches/net-core-remove-explicit-do_softirq-from-busy_poll_s.patch
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -5059,8 +5059,6 @@ static void busy_poll_stop(struct napi_s
+@@ -5060,8 +5060,6 @@ static void busy_poll_stop(struct napi_s
if (rc == BUSY_POLL_BUDGET)
__napi_schedule(napi);
local_bh_enable();
diff --git a/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch b/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
index 46e8b749e597..2081b0b4bc76 100644
--- a/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
+++ b/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
@@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3077,7 +3077,11 @@ static inline int __dev_xmit_skb(struct
+@@ -3078,7 +3078,11 @@ static inline int __dev_xmit_skb(struct
* This permits qdisc->running owner to get the lock more
* often and dequeue packets faster.
*/
diff --git a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
index 30d4f1f47b2f..7ef11e59318f 100644
--- a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
+++ b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -2427,14 +2427,53 @@ void netdev_freemem(struct net_device *d
+@@ -2428,14 +2428,53 @@ void netdev_freemem(struct net_device *d
void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev);
@@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct task_struct *oom_reaper_list;
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3140,8 +3140,10 @@ static void skb_update_prio(struct sk_bu
+@@ -3141,8 +3141,10 @@ static void skb_update_prio(struct sk_bu
#define skb_update_prio(skb)
#endif
@@ -103,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* dev_loopback_xmit - loop back @skb
-@@ -3381,8 +3383,7 @@ static int __dev_queue_xmit(struct sk_bu
+@@ -3382,8 +3384,7 @@ static int __dev_queue_xmit(struct sk_bu
int cpu = smp_processor_id(); /* ok because BHs are off */
if (txq->xmit_lock_owner != cpu) {
@@ -113,7 +113,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto recursion_alert;
skb = validate_xmit_skb(skb, dev);
-@@ -3392,9 +3393,9 @@ static int __dev_queue_xmit(struct sk_bu
+@@ -3393,9 +3394,9 @@ static int __dev_queue_xmit(struct sk_bu
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_xmit_stopped(txq)) {
diff --git a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
index e2d3b322dad6..b14654289847 100644
--- a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
+++ b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void raise_softirq_irqoff(unsigned int nr)
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -5367,7 +5367,7 @@ static __latent_entropy void net_rx_acti
+@@ -5368,7 +5368,7 @@ static __latent_entropy void net_rx_acti
list_splice_tail(&repoll, &list);
list_splice(&list, &sd->poll_list);
if (!list_empty(&sd->poll_list))
diff --git a/patches/ping-sysrq.patch b/patches/ping-sysrq.patch
index de3a89ac5491..f14a905f6d0b 100644
--- a/patches/ping-sysrq.patch
+++ b/patches/ping-sysrq.patch
@@ -59,7 +59,7 @@ Signed-off-by: Carsten Emde <C.Emde@osadl.org>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
-@@ -927,6 +928,30 @@ static bool icmp_redirect(struct sk_buff
+@@ -931,6 +932,30 @@ static bool icmp_redirect(struct sk_buff
}
/*
@@ -90,7 +90,7 @@ Signed-off-by: Carsten Emde <C.Emde@osadl.org>
* Handle ICMP_ECHO ("ping") requests.
*
* RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo
-@@ -953,6 +978,11 @@ static bool icmp_echo(struct sk_buff *sk
+@@ -957,6 +982,11 @@ static bool icmp_echo(struct sk_buff *sk
icmp_param.data_len = skb->len;
icmp_param.head_len = sizeof(struct icmphdr);
icmp_reply(&icmp_param, skb);
diff --git a/patches/series b/patches/series
index 8547a3f47cda..fcfffa8bc661 100644
--- a/patches/series
+++ b/patches/series
@@ -47,6 +47,7 @@ lockdep-Fix-per-cpu-static-objects.patch
futex-rt_mutex-Fix-rt_mutex_cleanup_proxy_lock.patch
arm64-cpufeature-don-t-use-mutex-in-bringup-path.patch
+smp-hotplug-Move-unparking-of-percpu-threads-to-the-.patch
###
# get_online_cpus() rework.
@@ -138,6 +139,7 @@ iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch
iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
rxrpc-remove-unused-static-variables.patch
mm-swap-don-t-disable-preemption-while-taking-the-pe.patch
+cpu_pm-replace-raw_notifier-to-atomic_notifier.patch
# Wants a different fix for upstream
NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
diff --git a/patches/skbufhead-raw-lock.patch b/patches/skbufhead-raw-lock.patch
index 824c55abac37..924c5080e2fc 100644
--- a/patches/skbufhead-raw-lock.patch
+++ b/patches/skbufhead-raw-lock.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -2767,6 +2767,7 @@ struct softnet_data {
+@@ -2768,6 +2768,7 @@ struct softnet_data {
unsigned int dropped;
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
@@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
}
-@@ -4318,7 +4318,7 @@ static void flush_backlog(struct work_st
+@@ -4319,7 +4319,7 @@ static void flush_backlog(struct work_st
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->input_pkt_queue);
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
input_queue_head_incr(sd);
}
}
-@@ -4328,11 +4328,14 @@ static void flush_backlog(struct work_st
+@@ -4329,11 +4329,14 @@ static void flush_backlog(struct work_st
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->process_queue);
@@ -90,7 +90,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void flush_all_backlogs(void)
-@@ -4866,7 +4869,9 @@ static int process_backlog(struct napi_s
+@@ -4867,7 +4870,9 @@ static int process_backlog(struct napi_s
while (again) {
struct sk_buff *skb;
@@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
__netif_receive_skb(skb);
rcu_read_unlock();
-@@ -4874,9 +4879,9 @@ static int process_backlog(struct napi_s
+@@ -4875,9 +4880,9 @@ static int process_backlog(struct napi_s
if (++work >= quota)
return work;
@@ -111,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rps_lock(sd);
if (skb_queue_empty(&sd->input_pkt_queue)) {
/*
-@@ -5317,13 +5322,21 @@ static __latent_entropy void net_rx_acti
+@@ -5318,13 +5323,21 @@ static __latent_entropy void net_rx_acti
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
@@ -133,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for (;;) {
struct napi_struct *n;
-@@ -8084,6 +8097,9 @@ static int dev_cpu_dead(unsigned int old
+@@ -8089,6 +8102,9 @@ static int dev_cpu_dead(unsigned int old
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
@@ -143,7 +143,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return 0;
}
-@@ -8387,8 +8403,9 @@ static int __init net_dev_init(void)
+@@ -8392,8 +8408,9 @@ static int __init net_dev_init(void)
INIT_WORK(flush, flush_backlog);
diff --git a/patches/smp-hotplug-Move-unparking-of-percpu-threads-to-the-.patch b/patches/smp-hotplug-Move-unparking-of-percpu-threads-to-the-.patch
new file mode 100644
index 000000000000..b60a80420803
--- /dev/null
+++ b/patches/smp-hotplug-Move-unparking-of-percpu-threads-to-the-.patch
@@ -0,0 +1,157 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 6 Jul 2017 01:57:55 -0700
+Subject: [PATCH] smp/hotplug: Move unparking of percpu threads to the control
+ CPU
+
+Upstream commit 9cd4f1a4e7a858849e889a081a99adff83e08e4c
+
+Vikram reported the following backtrace:
+
+ BUG: scheduling while atomic: swapper/7/0/0x00000002
+ CPU: 7 PID: 0 Comm: swapper/7 Not tainted 4.9.32-perf+ #680
+ schedule
+ schedule_hrtimeout_range_clock
+ schedule_hrtimeout
+ wait_task_inactive
+ __kthread_bind_mask
+ __kthread_bind
+ __kthread_unpark
+ kthread_unpark
+ cpuhp_online_idle
+ cpu_startup_entry
+ secondary_start_kernel
+
+He analyzed correctly that a parked cpu hotplug thread of an offlined CPU
+was still on the runqueue when the CPU came back online and tried to unpark
+it. This causes the thread which invoked kthread_unpark() to call
+wait_task_inactive() and subsequently schedule() with preemption disabled.
+His proposed workaround was to "make sure" that a parked thread has
+scheduled out when the CPU goes offline, so the situation cannot happen.
+
+But that's still wrong because the root cause is not the fact that the
+percpu thread is still on the runqueue and neither that preemption is
+disabled, which could be simply solved by enabling preemption before
+calling kthread_unpark().
+
+The real issue is that the calling thread is the idle task of the upcoming
+CPU, which is not supposed to call anything which might sleep. The moron,
+who wrote that code, missed completely that kthread_unpark() might end up
+in schedule().
+
+The solution is simpler than expected. The thread which controls the
+hotplug operation is waiting for the CPU to call complete() on the hotplug
+state completion. So the idle task of the upcoming CPU can set its state to
+CPUHP_AP_ONLINE_IDLE and invoke complete(). This in turn wakes the control
+task on a different CPU, which then can safely do the unpark and kick the
+now unparked hotplug thread of the upcoming CPU to complete the bringup to
+the final target state.
+
+Control CPU AP
+
+bringup_cpu();
+ __cpu_up() ------------>
+ bringup_ap();
+ bringup_wait_for_ap()
+ wait_for_completion();
+ cpuhp_online_idle();
+ <------------ complete();
+ unpark(AP->stopper);
+ unpark(AP->hotplugthread);
+ while(1)
+ do_idle();
+ kick(AP->hotplugthread);
+ wait_for_completion(); hotplug_thread()
+ run_online_callbacks();
+ complete();
+
+Fixes: 8df3e07e7f21 ("cpu/hotplug: Let upcoming cpu bring itself fully up")
+Reported-by: Vikram Mulukutla <markivx@codeaurora.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Peter Zijlstra <peterz@infradead.org>
+Cc: Sebastian Sewior <bigeasy@linutronix.de>
+Cc: Rusty Russell <rusty@rustcorp.com.au>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1707042218020.2131@nanos
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/cpu.c | 37 ++++++++++++++++++-------------------
+ 1 file changed, 18 insertions(+), 19 deletions(-)
+
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -344,13 +344,25 @@ void cpu_hotplug_enable(void)
+ EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
+ #endif /* CONFIG_HOTPLUG_CPU */
+
+-/* Notifier wrappers for transitioning to state machine */
++static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st);
+
+ static int bringup_wait_for_ap(unsigned int cpu)
+ {
+ struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+
++ /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
+ wait_for_completion(&st->done);
++ BUG_ON(!cpu_online(cpu));
++
++ /* Unpark the stopper thread and the hotplug thread of the target cpu */
++ stop_machine_unpark(cpu);
++ kthread_unpark(st->thread);
++
++ /* Should we go further up ? */
++ if (st->target > CPUHP_AP_ONLINE_IDLE) {
++ __cpuhp_kick_ap_work(st);
++ wait_for_completion(&st->done);
++ }
+ return st->result;
+ }
+
+@@ -371,9 +383,7 @@ static int bringup_cpu(unsigned int cpu)
+ irq_unlock_sparse();
+ if (ret)
+ return ret;
+- ret = bringup_wait_for_ap(cpu);
+- BUG_ON(!cpu_online(cpu));
+- return ret;
++ return bringup_wait_for_ap(cpu);
+ }
+
+ /*
+@@ -859,31 +869,20 @@ void notify_cpu_starting(unsigned int cp
+ }
+
+ /*
+- * Called from the idle task. We need to set active here, so we can kick off
+- * the stopper thread and unpark the smpboot threads. If the target state is
+- * beyond CPUHP_AP_ONLINE_IDLE we kick cpuhp thread and let it bring up the
+- * cpu further.
++ * Called from the idle task. Wake up the controlling task which brings the
++ * stopper and the hotplug thread of the upcoming CPU up and then delegates
++ * the rest of the online bringup to the hotplug thread.
+ */
+ void cpuhp_online_idle(enum cpuhp_state state)
+ {
+ struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
+- unsigned int cpu = smp_processor_id();
+
+ /* Happens for the boot cpu */
+ if (state != CPUHP_AP_ONLINE_IDLE)
+ return;
+
+ st->state = CPUHP_AP_ONLINE_IDLE;
+-
+- /* Unpark the stopper thread and the hotplug thread of this cpu */
+- stop_machine_unpark(cpu);
+- kthread_unpark(st->thread);
+-
+- /* Should we go further up ? */
+- if (st->target > CPUHP_AP_ONLINE_IDLE)
+- __cpuhp_kick_ap_work(st);
+- else
+- complete(&st->done);
++ complete(&st->done);
+ }
+
+ /* Requires cpu_add_remove_lock to be held */
diff --git a/patches/softirq-disable-softirq-stacks-for-rt.patch b/patches/softirq-disable-softirq-stacks-for-rt.patch
index c79b47661028..d8de0279dec1 100644
--- a/patches/softirq-disable-softirq-stacks-for-rt.patch
+++ b/patches/softirq-disable-softirq-stacks-for-rt.patch
@@ -109,7 +109,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void fixup_irqs(void)
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
-@@ -892,6 +892,7 @@ EXPORT_SYMBOL(native_load_gs_index)
+@@ -889,6 +889,7 @@ EXPORT_SYMBOL(native_load_gs_index)
jmp 2b
.previous
@@ -117,7 +117,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(do_softirq_own_stack)
pushq %rbp
-@@ -904,6 +905,7 @@ ENTRY(do_softirq_own_stack)
+@@ -901,6 +902,7 @@ ENTRY(do_softirq_own_stack)
decl PER_CPU_VAR(irq_count)
ret
END(do_softirq_own_stack)
diff --git a/patches/softirq-preempt-fix-3-re.patch b/patches/softirq-preempt-fix-3-re.patch
index 15a4ff830ee1..82420d058513 100644
--- a/patches/softirq-preempt-fix-3-re.patch
+++ b/patches/softirq-preempt-fix-3-re.patch
@@ -111,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -2403,6 +2403,7 @@ static void __netif_reschedule(struct Qd
+@@ -2404,6 +2404,7 @@ static void __netif_reschedule(struct Qd
sd->output_queue_tailp = &q->next_sched;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -119,7 +119,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
void __netif_schedule(struct Qdisc *q)
-@@ -2465,6 +2466,7 @@ void __dev_kfree_skb_irq(struct sk_buff
+@@ -2466,6 +2467,7 @@ void __dev_kfree_skb_irq(struct sk_buff
__this_cpu_write(softnet_data.completion_queue, skb);
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -127,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__dev_kfree_skb_irq);
-@@ -3772,6 +3774,7 @@ static int enqueue_to_backlog(struct sk_
+@@ -3773,6 +3775,7 @@ static int enqueue_to_backlog(struct sk_
rps_unlock(sd);
local_irq_restore(flags);
@@ -135,7 +135,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
-@@ -4821,6 +4824,7 @@ static void net_rps_action_and_irq_enabl
+@@ -4822,6 +4825,7 @@ static void net_rps_action_and_irq_enabl
sd->rps_ipi_list = NULL;
local_irq_enable();
@@ -143,7 +143,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Send pending IPI's to kick RPS processing on remote cpus. */
while (remsd) {
-@@ -4834,6 +4838,7 @@ static void net_rps_action_and_irq_enabl
+@@ -4835,6 +4839,7 @@ static void net_rps_action_and_irq_enabl
} else
#endif
local_irq_enable();
@@ -151,7 +151,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -4911,6 +4916,7 @@ void __napi_schedule(struct napi_struct
+@@ -4912,6 +4917,7 @@ void __napi_schedule(struct napi_struct
local_irq_save(flags);
____napi_schedule(this_cpu_ptr(&softnet_data), n);
local_irq_restore(flags);
@@ -159,7 +159,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__napi_schedule);
-@@ -8069,6 +8075,7 @@ static int dev_cpu_dead(unsigned int old
+@@ -8074,6 +8080,7 @@ static int dev_cpu_dead(unsigned int old
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
diff --git a/patches/softirq-split-locks.patch b/patches/softirq-split-locks.patch
index c36441c5af8c..84be7bf11d2b 100644
--- a/patches/softirq-split-locks.patch
+++ b/patches/softirq-split-locks.patch
@@ -816,7 +816,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3844,11 +3844,9 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3845,11 +3845,9 @@ int netif_rx_ni(struct sk_buff *skb)
trace_netif_rx_ni_entry(skb);
diff --git a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
index 36dced060c05..c29771a0373f 100644
--- a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
+++ b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3798,7 +3798,7 @@ static int netif_rx_internal(struct sk_b
+@@ -3799,7 +3799,7 @@ static int netif_rx_internal(struct sk_b
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
@@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
-@@ -3808,14 +3808,14 @@ static int netif_rx_internal(struct sk_b
+@@ -3809,14 +3809,14 @@ static int netif_rx_internal(struct sk_b
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
diff --git a/patches/x86-kvm-require-const-tsc-for-rt.patch b/patches/x86-kvm-require-const-tsc-for-rt.patch
index acfa6b0ca035..916812dca44c 100644
--- a/patches/x86-kvm-require-const-tsc-for-rt.patch
+++ b/patches/x86-kvm-require-const-tsc-for-rt.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -6107,6 +6107,13 @@ int kvm_arch_init(void *opaque)
+@@ -6118,6 +6118,13 @@ int kvm_arch_init(void *opaque)
goto out;
}
diff --git a/patches/x86-preempt-lazy.patch b/patches/x86-preempt-lazy.patch
index 789cd45e6075..de5b8740b309 100644
--- a/patches/x86-preempt-lazy.patch
+++ b/patches/x86-preempt-lazy.patch
@@ -47,7 +47,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
-@@ -332,8 +332,25 @@ END(ret_from_exception)
+@@ -340,8 +340,25 @@ END(ret_from_exception)
ENTRY(resume_kernel)
DISABLE_INTERRUPTS(CLBR_ANY)
.Lneed_resched:
@@ -75,7 +75,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
call preempt_schedule_irq
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
-@@ -544,7 +544,23 @@ GLOBAL(retint_user)
+@@ -541,7 +541,23 @@ GLOBAL(retint_user)
bt $9, EFLAGS(%rsp) /* were interrupts off? */
jnc 1f
0: cmpl $0, PER_CPU_VAR(__preempt_count)