summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2018-05-29 17:34:09 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2018-05-29 17:34:09 +0200
commit0eab90fb699a3900b9e1bc3fd3f0d0b325534789 (patch)
treecb6fada99940ba852bbe24f9eec459eecc351d75
parentbfdc181c0c9bd6c8c55cd44563db24aa7bc7bb1e (diff)
downloadlinux-rt-4.16.12-rt5-patches.tar.gz
[ANNOUNCE] v4.16.12-rt5v4.16.12-rt5-patches
Dear RT folks! I'm pleased to announce the v4.16.12-rt5 patch set. Changes since v4.16.12-rt4: - Update the "suspend prevent might sleep splat" patch. The newer version also supports s2idle. - The seqlock implementation had a missing memory barrier. Patch by Julia Cartwright. - The new priority reported by trace_sched_pi_setprio() was wrong in case the task was de-boosted. Reported by Christian Mansky. - Update of the refcount_t queue: The raid5 patch was replaced with the atomic interface because it does not fit for the refcount_t API. - Since the last release softirq_count() returns the "BH disable" count. In this release I am dropping the workarounds we had because softirq_count() returned always 0. Known issues - A warning triggered in "rcu_note_context_switch" originated from SyS_timer_gettime(). The issue was always there, it is now visible. Reported by Grygorii Strashko and Daniel Wagner. The delta patch against v4.16.12-rt4 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.16/incr/patch-4.16.12-rt4-rt5.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.16.12-rt5 The RT patch against v4.16.12 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.16/older/patch-4.16.12-rt5.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.16/older/patches-4.16.12-rt5.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/0001-PM-suspend-Prevent-might-sleep-splats.patch (renamed from patches/suspend-prevernt-might-sleep-splats.patch)48
-rw-r--r--patches/0001-bdi-use-refcount_t-for-reference-counting-instead-at.patch8
-rw-r--r--patches/0001-spinlock-atomic_dec_and_lock-Add-an-irqsave-variant.patch57
-rw-r--r--patches/0002-PM-wakeup-Make-events_lock-a-RAW_SPINLOCK.patch82
-rw-r--r--patches/0002-drivers-md-raid5-Use-irqsave-variant-of-atomic_dec_a.patch (renamed from patches/0007-md-raid5-Use-irqsave-variant-of-refcount_dec_and_loc.patch)12
-rw-r--r--patches/0003-PM-s2idle-Make-s2idle_wait_head-swait-based.patch53
-rw-r--r--patches/0003-drivers-md-raid5-Do-not-disable-irq-on-release_inact.patch (renamed from patches/0008-md-raid5-Do-not-disable-irq-on-release_inactive_stri.patch)5
-rw-r--r--patches/0003-md-raid5-use-refcount_t-for-reference-counting-inste.patch364
-rw-r--r--patches/0004-PM-wakeup-Make-s2idle_lock-a-RAW_SPINLOCK.patch68
-rw-r--r--patches/0005-bdi-Use-irqsave-variant-of-refcount_dec_and_lock.patch2
-rw-r--r--patches/RCU-skip-the-schedule-in-RCU-section-warning-on-UP-t.patch2
-rw-r--r--patches/add_migrate_disable.patch8
-rw-r--r--patches/completion-use-simple-wait-queues.patch12
-rw-r--r--patches/cond-resched-softirq-rt.patch6
-rw-r--r--patches/hotplug-light-get-online-cpus.patch8
-rw-r--r--patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch2
-rw-r--r--patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch36
-rw-r--r--patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch2
-rw-r--r--patches/kgb-serial-hackaround.patch2
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/lockdep-Add-a-assert_in_softirq.patch33
-rw-r--r--patches/lockdep-no-softirq-accounting-on-rt.patch23
-rw-r--r--patches/md-raid5-percpu-handling-rt-aware.patch4
-rw-r--r--patches/mm-memcontrol-do_not_disable_irq.patch14
-rw-r--r--patches/net-3com-3c59x-Move-boomerang-vortex-conditional-int.patch10
-rw-r--r--patches/net-3com-3c59x-Pull-locking-out-of-ISR.patch10
-rw-r--r--patches/net-3com-3c59x-irq-save-variant-of-ISR.patch4
-rw-r--r--patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch4
-rw-r--r--patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch2
-rw-r--r--patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch2
-rw-r--r--patches/net-mac808211-rc-warn_on.patch53
-rw-r--r--patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch2
-rw-r--r--patches/net-use-task_struct-instead-of-CPU-number-as-the-que.patch2
-rw-r--r--patches/preempt-lazy-support.patch22
-rw-r--r--patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch8
-rw-r--r--patches/radix-tree-use-local-locks.patch4
-rw-r--r--patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch6
-rw-r--r--patches/rtmutex-add-sleeping-lock-implementation.patch4
-rw-r--r--patches/rtmutex-annotate-sleeping-lock-context.patch2
-rw-r--r--patches/sched-Prevent-task-state-corruption-by-spurious-lock.patch2
-rw-r--r--patches/sched-let-softirq_count-return-0-if-inside-local_bh_.patch1
-rw-r--r--patches/sched-limit-nr-migrate.patch2
-rw-r--r--patches/sched-might-sleep-do-not-account-rcu-depth.patch2
-rw-r--r--patches/sched-mmdrop-delayed.patch8
-rw-r--r--patches/sched-rt-mutex-wakeup.patch6
-rw-r--r--patches/sched-tracing-Fix-trace_sched_pi_setprio-for-deboost.patch52
-rw-r--r--patches/sched-ttwu-ensure-success-return-is-correct.patch2
-rw-r--r--patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch2
-rw-r--r--patches/seqlock-provide-the-same-ordering-semantics-as-mainl.patch29
-rw-r--r--patches/series22
-rw-r--r--patches/skbufhead-raw-lock.patch14
-rw-r--r--patches/softirq-preempt-fix-3-re.patch6
-rw-r--r--patches/timers-prepare-for-full-preemption.patch4
-rw-r--r--patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch2
-rw-r--r--patches/workqueue-distangle-from-rq-lock.patch12
-rw-r--r--patches/workqueue-prevent-deadlock-stall.patch4
-rw-r--r--patches/x86-kvm-require-const-tsc-for-rt.patch2
-rw-r--r--patches/x86-preempt-lazy.patch6
58 files changed, 531 insertions, 635 deletions
diff --git a/patches/suspend-prevernt-might-sleep-splats.patch b/patches/0001-PM-suspend-Prevent-might-sleep-splats.patch
index 2b51807b3d55..0e410af325c2 100644
--- a/patches/suspend-prevernt-might-sleep-splats.patch
+++ b/patches/0001-PM-suspend-Prevent-might-sleep-splats.patch
@@ -1,27 +1,35 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 15 Jul 2010 10:29:00 +0200
-Subject: suspend: Prevent might sleep splats
+Subject: [PATCH 1/4] PM / suspend: Prevent might sleep splats
-timekeeping suspend/resume calls read_persistant_clock() which takes
+timekeeping suspend/resume calls read_persistent_clock() which takes
rtc_lock. That results in might sleep warnings because at that point
we run with interrupts disabled.
We cannot convert rtc_lock to a raw spinlock as that would trigger
other might sleep warnings.
-As a temporary workaround we disable the might sleep warnings by
-setting system_state to SYSTEM_SUSPEND before calling sysdev_suspend()
-and restoring it to SYSTEM_RUNNING afer sysdev_resume().
+As a workaround we disable the might sleep warnings by setting
+system_state to SYSTEM_SUSPEND before calling sysdev_suspend() and
+restoring it to SYSTEM_RUNNING afer sysdev_resume(). There is no lock
+contention because hibernate / suspend to RAM is single-CPU at this
+point.
-Needs to be revisited.
+In s2idle's case the system_state is set to SYSTEM_SUSPEND before
+timekeeping_suspend() which is invoked by the last CPU. In the resume
+case it set back to SYSTEM_RUNNING after timekeeping_resume() which is
+invoked by the first CPU in the resume case. The other CPUs will block
+on tick_freeze_lock.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
+[bigeasy: cover s2idle]
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/kernel.h | 1 +
- kernel/power/hibernate.c | 7 +++++++
- kernel/power/suspend.c | 4 ++++
- 3 files changed, 12 insertions(+)
+ include/linux/kernel.h | 1 +
+ kernel/power/hibernate.c | 7 +++++++
+ kernel/power/suspend.c | 4 ++++
+ kernel/time/tick-common.c | 2 ++
+ 4 files changed, 14 insertions(+)
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -104,3 +112,21 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch_suspend_enable_irqs();
BUG_ON(irqs_disabled());
+--- a/kernel/time/tick-common.c
++++ b/kernel/time/tick-common.c
+@@ -490,6 +490,7 @@ void tick_freeze(void)
+ if (tick_freeze_depth == num_online_cpus()) {
+ trace_suspend_resume(TPS("timekeeping_freeze"),
+ smp_processor_id(), true);
++ system_state = SYSTEM_SUSPEND;
+ timekeeping_suspend();
+ } else {
+ tick_suspend_local();
+@@ -513,6 +514,7 @@ void tick_unfreeze(void)
+
+ if (tick_freeze_depth == num_online_cpus()) {
+ timekeeping_resume();
++ system_state = SYSTEM_RUNNING;
+ trace_suspend_resume(TPS("timekeeping_freeze"),
+ smp_processor_id(), false);
+ } else {
diff --git a/patches/0001-bdi-use-refcount_t-for-reference-counting-instead-at.patch b/patches/0001-bdi-use-refcount_t-for-reference-counting-instead-at.patch
index 9cdc4c982ecd..bf52cabe6692 100644
--- a/patches/0001-bdi-use-refcount_t-for-reference-counting-instead-at.patch
+++ b/patches/0001-bdi-use-refcount_t-for-reference-counting-instead-at.patch
@@ -53,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
-@@ -460,10 +460,10 @@ wb_congested_get_create(struct backing_d
+@@ -461,10 +461,10 @@ wb_congested_get_create(struct backing_d
if (new_congested) {
/* !found and storage for new one already allocated, insert */
congested = new_congested;
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
spin_unlock_irqrestore(&cgwb_lock, flags);
-@@ -473,13 +473,13 @@ wb_congested_get_create(struct backing_d
+@@ -474,13 +474,13 @@ wb_congested_get_create(struct backing_d
if (!new_congested)
return NULL;
@@ -82,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock_irqrestore(&cgwb_lock, flags);
kfree(new_congested);
return congested;
-@@ -496,7 +496,7 @@ void wb_congested_put(struct bdi_writeba
+@@ -497,7 +497,7 @@ void wb_congested_put(struct bdi_writeba
unsigned long flags;
local_irq_save(flags);
@@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_irq_restore(flags);
return;
}
-@@ -806,7 +806,7 @@ static int cgwb_bdi_init(struct backing_
+@@ -807,7 +807,7 @@ static int cgwb_bdi_init(struct backing_
if (!bdi->wb_congested)
return -ENOMEM;
diff --git a/patches/0001-spinlock-atomic_dec_and_lock-Add-an-irqsave-variant.patch b/patches/0001-spinlock-atomic_dec_and_lock-Add-an-irqsave-variant.patch
new file mode 100644
index 000000000000..9f34a6e1efe9
--- /dev/null
+++ b/patches/0001-spinlock-atomic_dec_and_lock-Add-an-irqsave-variant.patch
@@ -0,0 +1,57 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Fri, 4 May 2018 17:45:29 +0200
+Subject: [PATCH 1/3] spinlock: atomic_dec_and_lock: Add an irqsave variant
+
+There are in-tree users of atomic_dec_and_lock() which must acquire the
+spin lock with interrupts disabled. To workaround the lack of an irqsave
+variant of atomic_dec_and_lock() they use local_irq_save() at the call
+site. This causes extra code and creates in some places unneeded long
+interrupt disabled times. These places need also extra treatment for
+PREEMPT_RT due to the disconnect of the irq disabling and the lock
+function.
+
+Implement the missing irqsave variant of the function.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/spinlock.h | 5 +++++
+ lib/dec_and_lock.c | 16 ++++++++++++++++
+ 2 files changed, 21 insertions(+)
+
+--- a/include/linux/spinlock.h
++++ b/include/linux/spinlock.h
+@@ -409,6 +409,11 @@ extern int _atomic_dec_and_lock(atomic_t
+ #define atomic_dec_and_lock(atomic, lock) \
+ __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
+
++extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
++ unsigned long *flags);
++#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
++ __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
++
+ int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
+ size_t max_size, unsigned int cpu_mult,
+ gfp_t gfp);
+--- a/lib/dec_and_lock.c
++++ b/lib/dec_and_lock.c
+@@ -33,3 +33,19 @@ int _atomic_dec_and_lock(atomic_t *atomi
+ }
+
+ EXPORT_SYMBOL(_atomic_dec_and_lock);
++
++int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
++ unsigned long *flags)
++{
++ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
++ if (atomic_add_unless(atomic, -1, 1))
++ return 0;
++
++ /* Otherwise do it the slow way */
++ spin_lock_irqsave(lock, *flags);
++ if (atomic_dec_and_test(atomic))
++ return 1;
++ spin_unlock_irqrestore(lock, *flags);
++ return 0;
++}
++EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave);
diff --git a/patches/0002-PM-wakeup-Make-events_lock-a-RAW_SPINLOCK.patch b/patches/0002-PM-wakeup-Make-events_lock-a-RAW_SPINLOCK.patch
new file mode 100644
index 000000000000..bf8b3e2f68b1
--- /dev/null
+++ b/patches/0002-PM-wakeup-Make-events_lock-a-RAW_SPINLOCK.patch
@@ -0,0 +1,82 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 25 May 2018 09:57:42 +0200
+Subject: [PATCH 2/4] PM / wakeup: Make events_lock a RAW_SPINLOCK
+
+The `events_lock' is acquired during suspend while interrupts are
+disabled even on RT. The lock is taken only for a very brief moment.
+Make it a RAW lock which avoids "sleeping while atomic" warnings on RT.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/base/power/wakeup.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+--- a/drivers/base/power/wakeup.c
++++ b/drivers/base/power/wakeup.c
+@@ -57,7 +57,7 @@ static void split_counters(unsigned int
+ /* A preserved old value of the events counter. */
+ static unsigned int saved_count;
+
+-static DEFINE_SPINLOCK(events_lock);
++static DEFINE_RAW_SPINLOCK(events_lock);
+
+ static void pm_wakeup_timer_fn(struct timer_list *t);
+
+@@ -185,9 +185,9 @@ void wakeup_source_add(struct wakeup_sou
+ ws->active = false;
+ ws->last_time = ktime_get();
+
+- spin_lock_irqsave(&events_lock, flags);
++ raw_spin_lock_irqsave(&events_lock, flags);
+ list_add_rcu(&ws->entry, &wakeup_sources);
+- spin_unlock_irqrestore(&events_lock, flags);
++ raw_spin_unlock_irqrestore(&events_lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(wakeup_source_add);
+
+@@ -202,9 +202,9 @@ void wakeup_source_remove(struct wakeup_
+ if (WARN_ON(!ws))
+ return;
+
+- spin_lock_irqsave(&events_lock, flags);
++ raw_spin_lock_irqsave(&events_lock, flags);
+ list_del_rcu(&ws->entry);
+- spin_unlock_irqrestore(&events_lock, flags);
++ raw_spin_unlock_irqrestore(&events_lock, flags);
+ synchronize_srcu(&wakeup_srcu);
+ }
+ EXPORT_SYMBOL_GPL(wakeup_source_remove);
+@@ -843,7 +843,7 @@ bool pm_wakeup_pending(void)
+ unsigned long flags;
+ bool ret = false;
+
+- spin_lock_irqsave(&events_lock, flags);
++ raw_spin_lock_irqsave(&events_lock, flags);
+ if (events_check_enabled) {
+ unsigned int cnt, inpr;
+
+@@ -851,7 +851,7 @@ bool pm_wakeup_pending(void)
+ ret = (cnt != saved_count || inpr > 0);
+ events_check_enabled = !ret;
+ }
+- spin_unlock_irqrestore(&events_lock, flags);
++ raw_spin_unlock_irqrestore(&events_lock, flags);
+
+ if (ret) {
+ pr_info("PM: Wakeup pending, aborting suspend\n");
+@@ -940,13 +940,13 @@ bool pm_save_wakeup_count(unsigned int c
+ unsigned long flags;
+
+ events_check_enabled = false;
+- spin_lock_irqsave(&events_lock, flags);
++ raw_spin_lock_irqsave(&events_lock, flags);
+ split_counters(&cnt, &inpr);
+ if (cnt == count && inpr == 0) {
+ saved_count = count;
+ events_check_enabled = true;
+ }
+- spin_unlock_irqrestore(&events_lock, flags);
++ raw_spin_unlock_irqrestore(&events_lock, flags);
+ return events_check_enabled;
+ }
+
diff --git a/patches/0007-md-raid5-Use-irqsave-variant-of-refcount_dec_and_loc.patch b/patches/0002-drivers-md-raid5-Use-irqsave-variant-of-atomic_dec_a.patch
index 9f669690c70f..e95405005c9c 100644
--- a/patches/0007-md-raid5-Use-irqsave-variant-of-refcount_dec_and_loc.patch
+++ b/patches/0002-drivers-md-raid5-Use-irqsave-variant-of-atomic_dec_a.patch
@@ -1,13 +1,13 @@
From: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Date: Wed, 4 Apr 2018 11:43:58 +0200
-Subject: [PATCH] md: raid5: Use irqsave variant of refcount_dec_and_lock()
+Date: Fri, 4 May 2018 17:45:32 +0200
+Subject: [PATCH 2/3] drivers/md/raid5: Use irqsave variant of
+ atomic_dec_and_lock()
-The irqsave variant of refcount_dec_and_lock handles irqsave/restore when
+The irqsave variant of atomic_dec_and_lock handles irqsave/restore when
taking/releasing the spin lock. With this variant the call of
local_irq_save is no longer required.
Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
-[bigeasy: s@atomic_dec_and_lock@refcount_dec_and_lock@g ]
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/md/raid5.c | 5 ++---
@@ -21,8 +21,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slow_path:
- local_irq_save(flags);
/* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
-- if (refcount_dec_and_lock(&sh->count, &conf->device_lock)) {
-+ if (refcount_dec_and_lock_irqsave(&sh->count, &conf->device_lock, &flags)) {
+- if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
++ if (atomic_dec_and_lock_irqsave(&sh->count, &conf->device_lock, flags)) {
INIT_LIST_HEAD(&list);
hash = sh->hash_lock_index;
do_release_stripe(conf, sh, &list);
diff --git a/patches/0003-PM-s2idle-Make-s2idle_wait_head-swait-based.patch b/patches/0003-PM-s2idle-Make-s2idle_wait_head-swait-based.patch
new file mode 100644
index 000000000000..2e9a0ab4e4c2
--- /dev/null
+++ b/patches/0003-PM-s2idle-Make-s2idle_wait_head-swait-based.patch
@@ -0,0 +1,53 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 25 May 2018 10:05:13 +0200
+Subject: [PATCH 3/4] PM / s2idle: Make s2idle_wait_head swait based
+
+s2idle_wait_head is used during s2idle with interrupts disabled even on
+RT. There is no "custom" wake up function so swait could be used instead
+which is also lower weight compared to the wait_queue.
+Make s2idle_wait_head a swait_queue_head.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/power/suspend.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -27,6 +27,7 @@
+ #include <linux/export.h>
+ #include <linux/suspend.h>
+ #include <linux/syscore_ops.h>
++#include <linux/swait.h>
+ #include <linux/ftrace.h>
+ #include <trace/events/power.h>
+ #include <linux/compiler.h>
+@@ -57,7 +58,7 @@ EXPORT_SYMBOL_GPL(pm_suspend_global_flag
+
+ static const struct platform_suspend_ops *suspend_ops;
+ static const struct platform_s2idle_ops *s2idle_ops;
+-static DECLARE_WAIT_QUEUE_HEAD(s2idle_wait_head);
++static DECLARE_SWAIT_QUEUE_HEAD(s2idle_wait_head);
+
+ enum s2idle_states __read_mostly s2idle_state;
+ static DEFINE_SPINLOCK(s2idle_lock);
+@@ -91,8 +92,8 @@ static void s2idle_enter(void)
+ /* Push all the CPUs into the idle loop. */
+ wake_up_all_idle_cpus();
+ /* Make the current CPU wait so it can enter the idle loop too. */
+- wait_event(s2idle_wait_head,
+- s2idle_state == S2IDLE_STATE_WAKE);
++ swait_event(s2idle_wait_head,
++ s2idle_state == S2IDLE_STATE_WAKE);
+
+ cpuidle_pause();
+ put_online_cpus();
+@@ -159,7 +160,7 @@ void s2idle_wake(void)
+ spin_lock_irqsave(&s2idle_lock, flags);
+ if (s2idle_state > S2IDLE_STATE_NONE) {
+ s2idle_state = S2IDLE_STATE_WAKE;
+- wake_up(&s2idle_wait_head);
++ swake_up(&s2idle_wait_head);
+ }
+ spin_unlock_irqrestore(&s2idle_lock, flags);
+ }
diff --git a/patches/0008-md-raid5-Do-not-disable-irq-on-release_inactive_stri.patch b/patches/0003-drivers-md-raid5-Do-not-disable-irq-on-release_inact.patch
index 50ea03768bf1..dff8bdff023b 100644
--- a/patches/0008-md-raid5-Do-not-disable-irq-on-release_inactive_stri.patch
+++ b/patches/0003-drivers-md-raid5-Do-not-disable-irq-on-release_inact.patch
@@ -1,6 +1,6 @@
From: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Date: Wed, 4 Apr 2018 11:43:59 +0200
-Subject: [PATCH] md: raid5: Do not disable irq on
+Date: Fri, 4 May 2018 17:45:33 +0200
+Subject: [PATCH 3/3] drivers/md/raid5: Do not disable irq on
release_inactive_stripe_list() call
There is no need to invoke release_inactive_stripe_list() with interrupts
@@ -10,7 +10,6 @@ disabled. All call sites, except raid5_release_stripe(), unlock
Make it consistent.
Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
-[bigeasy: s@atomic_dec_and_lock@refcount_dec_and_lock@g ]
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/md/raid5.c | 3 +--
diff --git a/patches/0003-md-raid5-use-refcount_t-for-reference-counting-inste.patch b/patches/0003-md-raid5-use-refcount_t-for-reference-counting-inste.patch
deleted file mode 100644
index 7d65ea8648e7..000000000000
--- a/patches/0003-md-raid5-use-refcount_t-for-reference-counting-inste.patch
+++ /dev/null
@@ -1,364 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 7 May 2018 17:42:52 +0200
-Subject: [PATCH] md: raid5: use refcount_t for reference counting instead
- atomic_t
-
-refcount_t type and corresponding API should be used instead of atomic_t when
-the variable is used as a reference counter. This allows to avoid accidental
-refcounter overflows that might lead to use-after-free situations.
-
-Most changes are 1:1 replacements except for
- BUG_ON(atomic_inc_return(&sh->count) != 1);
-
-which has been turned into
- refcount_inc(&sh->count);
- BUG_ON(refcount_read(&sh->count) != 1);
-
-Suggested-by: Peter Zijlstra <peterz@infradead.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/md/raid5-cache.c | 8 ++---
- drivers/md/raid5-ppl.c | 2 -
- drivers/md/raid5.c | 67 +++++++++++++++++++++++------------------------
- drivers/md/raid5.h | 4 +-
- 4 files changed, 41 insertions(+), 40 deletions(-)
-
---- a/drivers/md/raid5-cache.c
-+++ b/drivers/md/raid5-cache.c
-@@ -1049,7 +1049,7 @@ int r5l_write_stripe(struct r5l_log *log
- * don't delay.
- */
- clear_bit(STRIPE_DELAYED, &sh->state);
-- atomic_inc(&sh->count);
-+ refcount_inc(&sh->count);
-
- mutex_lock(&log->io_mutex);
- /* meta + data */
-@@ -1388,7 +1388,7 @@ static void r5c_flush_stripe(struct r5co
- lockdep_assert_held(&conf->device_lock);
-
- list_del_init(&sh->lru);
-- atomic_inc(&sh->count);
-+ refcount_inc(&sh->count);
-
- set_bit(STRIPE_HANDLE, &sh->state);
- atomic_inc(&conf->active_stripes);
-@@ -1491,7 +1491,7 @@ static void r5c_do_reclaim(struct r5conf
- */
- if (!list_empty(&sh->lru) &&
- !test_bit(STRIPE_HANDLE, &sh->state) &&
-- atomic_read(&sh->count) == 0) {
-+ refcount_read(&sh->count) == 0) {
- r5c_flush_stripe(conf, sh);
- if (count++ >= R5C_RECLAIM_STRIPE_GROUP)
- break;
-@@ -2912,7 +2912,7 @@ int r5c_cache_data(struct r5l_log *log,
- * don't delay.
- */
- clear_bit(STRIPE_DELAYED, &sh->state);
-- atomic_inc(&sh->count);
-+ refcount_inc(&sh->count);
-
- mutex_lock(&log->io_mutex);
- /* meta + data */
---- a/drivers/md/raid5-ppl.c
-+++ b/drivers/md/raid5-ppl.c
-@@ -388,7 +388,7 @@ int ppl_write_stripe(struct r5conf *conf
-
- set_bit(STRIPE_LOG_TRAPPED, &sh->state);
- clear_bit(STRIPE_DELAYED, &sh->state);
-- atomic_inc(&sh->count);
-+ refcount_inc(&sh->count);
-
- if (ppl_log_stripe(log, sh)) {
- spin_lock_irq(&ppl_conf->no_mem_stripes_lock);
---- a/drivers/md/raid5.c
-+++ b/drivers/md/raid5.c
-@@ -306,7 +306,7 @@ static void do_release_stripe(struct r5c
- static void __release_stripe(struct r5conf *conf, struct stripe_head *sh,
- struct list_head *temp_inactive_list)
- {
-- if (atomic_dec_and_test(&sh->count))
-+ if (refcount_dec_and_test(&sh->count))
- do_release_stripe(conf, sh, temp_inactive_list);
- }
-
-@@ -398,7 +398,7 @@ void raid5_release_stripe(struct stripe_
-
- /* Avoid release_list until the last reference.
- */
-- if (atomic_add_unless(&sh->count, -1, 1))
-+ if (refcount_dec_not_one(&sh->count))
- return;
-
- if (unlikely(!conf->mddev->thread) ||
-@@ -411,7 +411,7 @@ void raid5_release_stripe(struct stripe_
- slow_path:
- local_irq_save(flags);
- /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
-- if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
-+ if (refcount_dec_and_lock(&sh->count, &conf->device_lock)) {
- INIT_LIST_HEAD(&list);
- hash = sh->hash_lock_index;
- do_release_stripe(conf, sh, &list);
-@@ -501,7 +501,7 @@ static void init_stripe(struct stripe_he
- struct r5conf *conf = sh->raid_conf;
- int i, seq;
-
-- BUG_ON(atomic_read(&sh->count) != 0);
-+ BUG_ON(refcount_read(&sh->count) != 0);
- BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
- BUG_ON(stripe_operations_active(sh));
- BUG_ON(sh->batch_head);
-@@ -678,11 +678,11 @@ raid5_get_active_stripe(struct r5conf *c
- &conf->cache_state);
- } else {
- init_stripe(sh, sector, previous);
-- atomic_inc(&sh->count);
-+ refcount_inc(&sh->count);
- }
-- } else if (!atomic_inc_not_zero(&sh->count)) {
-+ } else if (!refcount_inc_not_zero(&sh->count)) {
- spin_lock(&conf->device_lock);
-- if (!atomic_read(&sh->count)) {
-+ if (!refcount_read(&sh->count)) {
- if (!test_bit(STRIPE_HANDLE, &sh->state))
- atomic_inc(&conf->active_stripes);
- BUG_ON(list_empty(&sh->lru) &&
-@@ -698,7 +698,7 @@ raid5_get_active_stripe(struct r5conf *c
- sh->group = NULL;
- }
- }
-- atomic_inc(&sh->count);
-+ refcount_inc(&sh->count);
- spin_unlock(&conf->device_lock);
- }
- } while (sh == NULL);
-@@ -760,9 +760,9 @@ static void stripe_add_to_batch_list(str
- hash = stripe_hash_locks_hash(head_sector);
- spin_lock_irq(conf->hash_locks + hash);
- head = __find_stripe(conf, head_sector, conf->generation);
-- if (head && !atomic_inc_not_zero(&head->count)) {
-+ if (head && !refcount_inc_not_zero(&head->count)) {
- spin_lock(&conf->device_lock);
-- if (!atomic_read(&head->count)) {
-+ if (!refcount_read(&head->count)) {
- if (!test_bit(STRIPE_HANDLE, &head->state))
- atomic_inc(&conf->active_stripes);
- BUG_ON(list_empty(&head->lru) &&
-@@ -778,7 +778,7 @@ static void stripe_add_to_batch_list(str
- head->group = NULL;
- }
- }
-- atomic_inc(&head->count);
-+ refcount_inc(&head->count);
- spin_unlock(&conf->device_lock);
- }
- spin_unlock_irq(conf->hash_locks + hash);
-@@ -847,7 +847,7 @@ static void stripe_add_to_batch_list(str
- sh->batch_head->bm_seq = seq;
- }
-
-- atomic_inc(&sh->count);
-+ refcount_inc(&sh->count);
- unlock_out:
- unlock_two_stripes(head, sh);
- out:
-@@ -1110,9 +1110,9 @@ static void ops_run_io(struct stripe_hea
- pr_debug("%s: for %llu schedule op %d on disc %d\n",
- __func__, (unsigned long long)sh->sector,
- bi->bi_opf, i);
-- atomic_inc(&sh->count);
-+ refcount_inc(&sh->count);
- if (sh != head_sh)
-- atomic_inc(&head_sh->count);
-+ refcount_inc(&head_sh->count);
- if (use_new_offset(conf, sh))
- bi->bi_iter.bi_sector = (sh->sector
- + rdev->new_data_offset);
-@@ -1174,9 +1174,9 @@ static void ops_run_io(struct stripe_hea
- "replacement disc %d\n",
- __func__, (unsigned long long)sh->sector,
- rbi->bi_opf, i);
-- atomic_inc(&sh->count);
-+ refcount_inc(&sh->count);
- if (sh != head_sh)
-- atomic_inc(&head_sh->count);
-+ refcount_inc(&head_sh->count);
- if (use_new_offset(conf, sh))
- rbi->bi_iter.bi_sector = (sh->sector
- + rrdev->new_data_offset);
-@@ -1354,7 +1354,7 @@ static void ops_run_biofill(struct strip
- }
- }
-
-- atomic_inc(&sh->count);
-+ refcount_inc(&sh->count);
- init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL);
- async_trigger_callback(&submit);
- }
-@@ -1432,7 +1432,7 @@ ops_run_compute5(struct stripe_head *sh,
- if (i != target)
- xor_srcs[count++] = sh->dev[i].page;
-
-- atomic_inc(&sh->count);
-+ refcount_inc(&sh->count);
-
- init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
- ops_complete_compute, sh, to_addr_conv(sh, percpu, 0));
-@@ -1521,7 +1521,7 @@ ops_run_compute6_1(struct stripe_head *s
- BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
- dest = tgt->page;
-
-- atomic_inc(&sh->count);
-+ refcount_inc(&sh->count);
-
- if (target == qd_idx) {
- count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL);
-@@ -1596,7 +1596,7 @@ ops_run_compute6_2(struct stripe_head *s
- pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
- __func__, (unsigned long long)sh->sector, faila, failb);
-
-- atomic_inc(&sh->count);
-+ refcount_inc(&sh->count);
-
- if (failb == syndrome_disks+1) {
- /* Q disk is one of the missing disks */
-@@ -1867,7 +1867,7 @@ ops_run_reconstruct5(struct stripe_head
- break;
- }
- if (i >= sh->disks) {
-- atomic_inc(&sh->count);
-+ refcount_inc(&sh->count);
- set_bit(R5_Discard, &sh->dev[pd_idx].flags);
- ops_complete_reconstruct(sh);
- return;
-@@ -1908,7 +1908,7 @@ ops_run_reconstruct5(struct stripe_head
- flags = ASYNC_TX_ACK |
- (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
-
-- atomic_inc(&head_sh->count);
-+ refcount_inc(&head_sh->count);
- init_async_submit(&submit, flags, tx, ops_complete_reconstruct, head_sh,
- to_addr_conv(sh, percpu, j));
- } else {
-@@ -1950,7 +1950,7 @@ ops_run_reconstruct6(struct stripe_head
- break;
- }
- if (i >= sh->disks) {
-- atomic_inc(&sh->count);
-+ refcount_inc(&sh->count);
- set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
- set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
- ops_complete_reconstruct(sh);
-@@ -1974,7 +1974,7 @@ ops_run_reconstruct6(struct stripe_head
- struct stripe_head, batch_list) == head_sh;
-
- if (last_stripe) {
-- atomic_inc(&head_sh->count);
-+ refcount_inc(&head_sh->count);
- init_async_submit(&submit, txflags, tx, ops_complete_reconstruct,
- head_sh, to_addr_conv(sh, percpu, j));
- } else
-@@ -2031,7 +2031,7 @@ static void ops_run_check_p(struct strip
- tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
- &sh->ops.zero_sum_result, &submit);
-
-- atomic_inc(&sh->count);
-+ refcount_inc(&sh->count);
- init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL);
- tx = async_trigger_callback(&submit);
- }
-@@ -2050,7 +2050,7 @@ static void ops_run_check_pq(struct stri
- if (!checkp)
- srcs[count] = NULL;
-
-- atomic_inc(&sh->count);
-+ refcount_inc(&sh->count);
- init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
- sh, to_addr_conv(sh, percpu, 0));
- async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
-@@ -2150,7 +2150,7 @@ static struct stripe_head *alloc_stripe(
- INIT_LIST_HEAD(&sh->lru);
- INIT_LIST_HEAD(&sh->r5c);
- INIT_LIST_HEAD(&sh->log_list);
-- atomic_set(&sh->count, 1);
-+ refcount_set(&sh->count, 1);
- sh->raid_conf = conf;
- sh->log_start = MaxSector;
- for (i = 0; i < disks; i++) {
-@@ -2451,7 +2451,7 @@ static int drop_one_stripe(struct r5conf
- spin_unlock_irq(conf->hash_locks + hash);
- if (!sh)
- return 0;
-- BUG_ON(atomic_read(&sh->count));
-+ BUG_ON(refcount_read(&sh->count));
- shrink_buffers(sh);
- free_stripe(conf->slab_cache, sh);
- atomic_dec(&conf->active_stripes);
-@@ -2483,7 +2483,7 @@ static void raid5_end_read_request(struc
- break;
-
- pr_debug("end_read_request %llu/%d, count: %d, error %d.\n",
-- (unsigned long long)sh->sector, i, atomic_read(&sh->count),
-+ (unsigned long long)sh->sector, i, refcount_read(&sh->count),
- bi->bi_status);
- if (i == disks) {
- bio_reset(bi);
-@@ -2620,7 +2620,7 @@ static void raid5_end_write_request(stru
- }
- }
- pr_debug("end_write_request %llu/%d, count %d, error: %d.\n",
-- (unsigned long long)sh->sector, i, atomic_read(&sh->count),
-+ (unsigned long long)sh->sector, i, refcount_read(&sh->count),
- bi->bi_status);
- if (i == disks) {
- bio_reset(bi);
-@@ -4687,7 +4687,7 @@ static void handle_stripe(struct stripe_
- pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
- "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
- (unsigned long long)sh->sector, sh->state,
-- atomic_read(&sh->count), sh->pd_idx, sh->qd_idx,
-+ refcount_read(&sh->count), sh->pd_idx, sh->qd_idx,
- sh->check_state, sh->reconstruct_state);
-
- analyse_stripe(sh, &s);
-@@ -5062,7 +5062,7 @@ static void activate_bit_delay(struct r5
- struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
- int hash;
- list_del_init(&sh->lru);
-- atomic_inc(&sh->count);
-+ refcount_inc(&sh->count);
- hash = sh->hash_lock_index;
- __release_stripe(conf, sh, &temp_inactive_list[hash]);
- }
-@@ -5387,7 +5387,8 @@ static struct stripe_head *__get_priorit
- sh->group = NULL;
- }
- list_del_init(&sh->lru);
-- BUG_ON(atomic_inc_return(&sh->count) != 1);
-+ refcount_inc(&sh->count);
-+ BUG_ON(refcount_read(&sh->count) != 1);
- return sh;
- }
-
---- a/drivers/md/raid5.h
-+++ b/drivers/md/raid5.h
-@@ -4,7 +4,7 @@
-
- #include <linux/raid/xor.h>
- #include <linux/dmaengine.h>
--
-+#include <linux/refcount.h>
- /*
- *
- * Each stripe contains one buffer per device. Each buffer can be in
-@@ -208,7 +208,7 @@ struct stripe_head {
- short ddf_layout;/* use DDF ordering to calculate Q */
- short hash_lock_index;
- unsigned long state; /* state flags */
-- atomic_t count; /* nr of active thread/requests */
-+ refcount_t count; /* nr of active thread/requests */
- int bm_seq; /* sequence number for bitmap flushes */
- int disks; /* disks in stripe */
- int overwrite_disks; /* total overwrite disks in stripe,
diff --git a/patches/0004-PM-wakeup-Make-s2idle_lock-a-RAW_SPINLOCK.patch b/patches/0004-PM-wakeup-Make-s2idle_lock-a-RAW_SPINLOCK.patch
new file mode 100644
index 000000000000..d2ff68e8f234
--- /dev/null
+++ b/patches/0004-PM-wakeup-Make-s2idle_lock-a-RAW_SPINLOCK.patch
@@ -0,0 +1,68 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 25 May 2018 10:16:05 +0200
+Subject: [PATCH 4/4] PM / wakeup: Make s2idle_lock a RAW_SPINLOCK
+
+The `s2idle_lock' is acquired during suspend while interrupts are
+disabled even on RT. The lock is acquired for short sections only.
+Make it a RAW lock which avoids "sleeping while atomic" warnings on RT.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/power/suspend.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -61,7 +61,7 @@ static const struct platform_s2idle_ops
+ static DECLARE_SWAIT_QUEUE_HEAD(s2idle_wait_head);
+
+ enum s2idle_states __read_mostly s2idle_state;
+-static DEFINE_SPINLOCK(s2idle_lock);
++static DEFINE_RAW_SPINLOCK(s2idle_lock);
+
+ void s2idle_set_ops(const struct platform_s2idle_ops *ops)
+ {
+@@ -79,12 +79,12 @@ static void s2idle_enter(void)
+ {
+ trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_TO_IDLE, true);
+
+- spin_lock_irq(&s2idle_lock);
++ raw_spin_lock_irq(&s2idle_lock);
+ if (pm_wakeup_pending())
+ goto out;
+
+ s2idle_state = S2IDLE_STATE_ENTER;
+- spin_unlock_irq(&s2idle_lock);
++ raw_spin_unlock_irq(&s2idle_lock);
+
+ get_online_cpus();
+ cpuidle_resume();
+@@ -98,11 +98,11 @@ static void s2idle_enter(void)
+ cpuidle_pause();
+ put_online_cpus();
+
+- spin_lock_irq(&s2idle_lock);
++ raw_spin_lock_irq(&s2idle_lock);
+
+ out:
+ s2idle_state = S2IDLE_STATE_NONE;
+- spin_unlock_irq(&s2idle_lock);
++ raw_spin_unlock_irq(&s2idle_lock);
+
+ trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_TO_IDLE, false);
+ }
+@@ -157,12 +157,12 @@ void s2idle_wake(void)
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&s2idle_lock, flags);
++ raw_spin_lock_irqsave(&s2idle_lock, flags);
+ if (s2idle_state > S2IDLE_STATE_NONE) {
+ s2idle_state = S2IDLE_STATE_WAKE;
+ swake_up(&s2idle_wait_head);
+ }
+- spin_unlock_irqrestore(&s2idle_lock, flags);
++ raw_spin_unlock_irqrestore(&s2idle_lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(s2idle_wake);
+
diff --git a/patches/0005-bdi-Use-irqsave-variant-of-refcount_dec_and_lock.patch b/patches/0005-bdi-Use-irqsave-variant-of-refcount_dec_and_lock.patch
index 3ccfd85e9b28..ab6944e9a841 100644
--- a/patches/0005-bdi-Use-irqsave-variant-of-refcount_dec_and_lock.patch
+++ b/patches/0005-bdi-Use-irqsave-variant-of-refcount_dec_and_lock.patch
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
-@@ -495,11 +495,8 @@ void wb_congested_put(struct bdi_writeba
+@@ -496,11 +496,8 @@ void wb_congested_put(struct bdi_writeba
{
unsigned long flags;
diff --git a/patches/RCU-skip-the-schedule-in-RCU-section-warning-on-UP-t.patch b/patches/RCU-skip-the-schedule-in-RCU-section-warning-on-UP-t.patch
index cd455710b2c6..0eefee6fb27e 100644
--- a/patches/RCU-skip-the-schedule-in-RCU-section-warning-on-UP-t.patch
+++ b/patches/RCU-skip-the-schedule-in-RCU-section-warning-on-UP-t.patch
@@ -68,7 +68,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0 && !mg_counter);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7278,4 +7278,49 @@ void migrate_enable(void)
+@@ -7283,4 +7283,49 @@ void migrate_enable(void)
preempt_enable();
}
EXPORT_SYMBOL(migrate_enable);
diff --git a/patches/add_migrate_disable.patch b/patches/add_migrate_disable.patch
index ab44783c3455..4e6299c81d7d 100644
--- a/patches/add_migrate_disable.patch
+++ b/patches/add_migrate_disable.patch
@@ -79,7 +79,7 @@ Subject: kernel/sched/core: add migrate_disable()
* boot command line:
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1033,7 +1033,15 @@ void set_cpus_allowed_common(struct task
+@@ -1034,7 +1034,15 @@ void set_cpus_allowed_common(struct task
p->nr_cpus_allowed = cpumask_weight(new_mask);
}
@@ -96,7 +96,7 @@ Subject: kernel/sched/core: add migrate_disable()
{
struct rq *rq = task_rq(p);
bool queued, running;
-@@ -1062,6 +1070,20 @@ void do_set_cpus_allowed(struct task_str
+@@ -1063,6 +1071,20 @@ void do_set_cpus_allowed(struct task_str
set_curr_task(rq, p);
}
@@ -117,7 +117,7 @@ Subject: kernel/sched/core: add migrate_disable()
/*
* Change a given task's CPU affinity. Migrate the thread to a
* proper CPU and schedule it away if the CPU it's executing on
-@@ -1120,9 +1142,16 @@ static int __set_cpus_allowed_ptr(struct
+@@ -1121,9 +1143,16 @@ static int __set_cpus_allowed_ptr(struct
}
/* Can the task run on the task's current CPU? If so, we're done */
@@ -135,7 +135,7 @@ Subject: kernel/sched/core: add migrate_disable()
dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
if (task_running(rq, p) || p->state == TASK_WAKING) {
struct migration_arg arg = { p, dest_cpu };
-@@ -7027,3 +7056,100 @@ const u32 sched_prio_to_wmult[40] = {
+@@ -7032,3 +7061,100 @@ const u32 sched_prio_to_wmult[40] = {
/* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
/* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
};
diff --git a/patches/completion-use-simple-wait-queues.patch b/patches/completion-use-simple-wait-queues.patch
index 6f368e476e8c..88f8382d7e5e 100644
--- a/patches/completion-use-simple-wait-queues.patch
+++ b/patches/completion-use-simple-wait-queues.patch
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
break;
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
-@@ -1608,7 +1608,7 @@ static void ffs_data_put(struct ffs_data
+@@ -1612,7 +1612,7 @@ static void ffs_data_put(struct ffs_data
pr_info("%s(): freeing\n", __func__);
ffs_data_clear(ffs);
BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
@@ -158,7 +158,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return error;
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
-@@ -593,6 +593,8 @@ static int enter_state(suspend_state_t s
+@@ -594,6 +594,8 @@ static int enter_state(suspend_state_t s
return error;
}
@@ -167,7 +167,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* pm_suspend - Externally visible function for suspending the system.
* @state: System sleep state to enter.
-@@ -607,6 +609,7 @@ int pm_suspend(suspend_state_t state)
+@@ -608,6 +610,7 @@ int pm_suspend(suspend_state_t state)
if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
return -EINVAL;
@@ -175,7 +175,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pr_info("suspend entry (%s)\n", mem_sleep_labels[state]);
error = enter_state(state);
if (error) {
-@@ -616,6 +619,7 @@ int pm_suspend(suspend_state_t state)
+@@ -617,6 +620,7 @@ int pm_suspend(suspend_state_t state)
suspend_stats.success++;
}
pr_info("suspend exit\n");
@@ -281,7 +281,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
EXPORT_SYMBOL(completion_done);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7075,7 +7075,10 @@ void migrate_disable(void)
+@@ -7080,7 +7080,10 @@ void migrate_disable(void)
return;
}
#ifdef CONFIG_SCHED_DEBUG
@@ -293,7 +293,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
if (p->migrate_disable) {
-@@ -7105,7 +7108,10 @@ void migrate_enable(void)
+@@ -7110,7 +7113,10 @@ void migrate_enable(void)
}
#ifdef CONFIG_SCHED_DEBUG
diff --git a/patches/cond-resched-softirq-rt.patch b/patches/cond-resched-softirq-rt.patch
index c4cf9972a7d1..7b09de7500b1 100644
--- a/patches/cond-resched-softirq-rt.patch
+++ b/patches/cond-resched-softirq-rt.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1625,12 +1625,16 @@ extern int __cond_resched_lock(spinlock_
+@@ -1633,12 +1633,16 @@ extern int __cond_resched_lock(spinlock_
__cond_resched_lock(lock); \
})
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -5020,6 +5020,7 @@ int __cond_resched_lock(spinlock_t *lock
+@@ -5021,6 +5021,7 @@ int __cond_resched_lock(spinlock_t *lock
}
EXPORT_SYMBOL(__cond_resched_lock);
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
-@@ -5033,6 +5034,7 @@ int __sched __cond_resched_softirq(void)
+@@ -5034,6 +5035,7 @@ int __sched __cond_resched_softirq(void)
return 0;
}
EXPORT_SYMBOL(__cond_resched_softirq);
diff --git a/patches/hotplug-light-get-online-cpus.patch b/patches/hotplug-light-get-online-cpus.patch
index 6d0b7f19a571..c1c36e438376 100644
--- a/patches/hotplug-light-get-online-cpus.patch
+++ b/patches/hotplug-light-get-online-cpus.patch
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
-@@ -106,6 +106,8 @@ extern void cpu_hotplug_disable(void);
+@@ -108,6 +108,8 @@ extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
void clear_tasks_mm_cpumask(int cpu);
int cpu_down(unsigned int cpu);
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#else /* CONFIG_HOTPLUG_CPU */
-@@ -116,6 +118,9 @@ static inline void cpus_read_unlock(void
+@@ -118,6 +120,9 @@ static inline void cpus_read_unlock(void
static inline void lockdep_assert_cpus_held(void) { }
static inline void cpu_hotplug_disable(void) { }
static inline void cpu_hotplug_enable(void) { }
@@ -64,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void cpus_read_lock(void)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7173,6 +7173,7 @@ void migrate_disable(void)
+@@ -7178,6 +7178,7 @@ void migrate_disable(void)
}
preempt_disable();
@@ -72,7 +72,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
migrate_disable_update_cpus_allowed(p);
p->migrate_disable = 1;
-@@ -7238,12 +7239,15 @@ void migrate_enable(void)
+@@ -7243,12 +7244,15 @@ void migrate_enable(void)
arg.task = p;
arg.dest_cpu = dest_cpu;
diff --git a/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch b/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch
index 706cc554e1e9..7efceebcf889 100644
--- a/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch
+++ b/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch
@@ -79,7 +79,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -343,7 +343,7 @@ static void init_rq_hrtick(struct rq *rq
+@@ -344,7 +344,7 @@ static void init_rq_hrtick(struct rq *rq
rq->hrtick_csd.info = rq;
#endif
diff --git a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
index 7ebb20ad8758..d85ac6827f62 100644
--- a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
+++ b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
@@ -274,7 +274,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!find_hca(cpu, &unit) && unit >= 0)
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
-@@ -361,9 +361,9 @@ static inline void task_context_switch_c
+@@ -386,9 +386,9 @@ static inline void task_context_switch_c
static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
{
seq_printf(m, "Cpus_allowed:\t%*pb\n",
@@ -344,7 +344,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* parent)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -970,7 +970,7 @@ static struct rq *__migrate_task(struct
+@@ -971,7 +971,7 @@ static struct rq *__migrate_task(struct
}
/* Affinity changed (again). */
@@ -353,7 +353,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return rq;
update_rq_clock(rq);
-@@ -998,7 +998,7 @@ static int migration_cpu_stop(void *data
+@@ -999,7 +999,7 @@ static int migration_cpu_stop(void *data
local_irq_disable();
/*
* We need to explicitly wake pending tasks before running
@@ -362,7 +362,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
*/
sched_ttwu_pending();
-@@ -1029,7 +1029,7 @@ static int migration_cpu_stop(void *data
+@@ -1030,7 +1030,7 @@ static int migration_cpu_stop(void *data
*/
void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
{
@@ -371,7 +371,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
p->nr_cpus_allowed = cpumask_weight(new_mask);
}
-@@ -1099,7 +1099,7 @@ static int __set_cpus_allowed_ptr(struct
+@@ -1100,7 +1100,7 @@ static int __set_cpus_allowed_ptr(struct
goto out;
}
@@ -380,7 +380,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
-@@ -1260,10 +1260,10 @@ static int migrate_swap_stop(void *data)
+@@ -1261,10 +1261,10 @@ static int migrate_swap_stop(void *data)
if (task_cpu(arg->src_task) != arg->src_cpu)
goto unlock;
@@ -393,7 +393,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto unlock;
__migrate_swap_task(arg->src_task, arg->dst_cpu);
-@@ -1304,10 +1304,10 @@ int migrate_swap(struct task_struct *cur
+@@ -1305,10 +1305,10 @@ int migrate_swap(struct task_struct *cur
if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
goto out;
@@ -406,7 +406,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
-@@ -1451,7 +1451,7 @@ void kick_process(struct task_struct *p)
+@@ -1452,7 +1452,7 @@ void kick_process(struct task_struct *p)
EXPORT_SYMBOL_GPL(kick_process);
/*
@@ -415,7 +415,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
* A few notes on cpu_active vs cpu_online:
*
-@@ -1491,14 +1491,14 @@ static int select_fallback_rq(int cpu, s
+@@ -1492,14 +1492,14 @@ static int select_fallback_rq(int cpu, s
for_each_cpu(dest_cpu, nodemask) {
if (!cpu_active(dest_cpu))
continue;
@@ -432,7 +432,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu))
continue;
if (!cpu_online(dest_cpu))
-@@ -1543,7 +1543,7 @@ static int select_fallback_rq(int cpu, s
+@@ -1544,7 +1544,7 @@ static int select_fallback_rq(int cpu, s
}
/*
@@ -441,7 +441,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
static inline
int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
-@@ -1553,11 +1553,11 @@ int select_task_rq(struct task_struct *p
+@@ -1554,11 +1554,11 @@ int select_task_rq(struct task_struct *p
if (p->nr_cpus_allowed > 1)
cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
else
@@ -455,7 +455,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* CPU.
*
* Since this is common to all placement strategies, this lives here.
-@@ -1565,7 +1565,7 @@ int select_task_rq(struct task_struct *p
+@@ -1566,7 +1566,7 @@ int select_task_rq(struct task_struct *p
* [ this allows ->select_task() to simply return task_cpu(p) and
* not worry about this generic constraint ]
*/
@@ -464,7 +464,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
!cpu_online(cpu)))
cpu = select_fallback_rq(task_cpu(p), p);
-@@ -2455,7 +2455,7 @@ void wake_up_new_task(struct task_struct
+@@ -2456,7 +2456,7 @@ void wake_up_new_task(struct task_struct
#ifdef CONFIG_SMP
/*
* Fork balancing, do it here and not earlier because:
@@ -473,7 +473,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* - any previously selected CPU might disappear through hotplug
*
* Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
-@@ -4239,7 +4239,7 @@ static int __sched_setscheduler(struct t
+@@ -4240,7 +4240,7 @@ static int __sched_setscheduler(struct t
* the entire root_domain to become SCHED_DEADLINE. We
* will also fail if there's no bandwidth available.
*/
@@ -482,7 +482,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rq->rd->dl_bw.bw == 0) {
task_rq_unlock(rq, p, &rf);
return -EPERM;
-@@ -4838,7 +4838,7 @@ long sched_getaffinity(pid_t pid, struct
+@@ -4839,7 +4839,7 @@ long sched_getaffinity(pid_t pid, struct
goto out_unlock;
raw_spin_lock_irqsave(&p->pi_lock, flags);
@@ -491,7 +491,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
out_unlock:
-@@ -5428,7 +5428,7 @@ int task_can_attach(struct task_struct *
+@@ -5429,7 +5429,7 @@ int task_can_attach(struct task_struct *
* allowed nodes is unnecessary. Thus, cpusets are not
* applicable for such threads. This prevents checking for
* success of set_cpus_allowed_ptr() on all attached tasks
@@ -500,7 +500,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
if (p->flags & PF_NO_SETAFFINITY) {
ret = -EINVAL;
-@@ -5455,7 +5455,7 @@ int migrate_task_to(struct task_struct *
+@@ -5456,7 +5456,7 @@ int migrate_task_to(struct task_struct *
if (curr_cpu == target_cpu)
return 0;
@@ -509,7 +509,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return -EINVAL;
/* TODO: This is not properly updating schedstats */
-@@ -5592,7 +5592,7 @@ static void migrate_tasks(struct rq *dea
+@@ -5593,7 +5593,7 @@ static void migrate_tasks(struct rq *dea
put_prev_task(rq, next);
/*
diff --git a/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
index 98f430841c9d..27f8b35f0c63 100644
--- a/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
+++ b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
security_task_free(tsk);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2778,15 +2778,6 @@ static struct rq *finish_task_switch(str
+@@ -2779,15 +2779,6 @@ static struct rq *finish_task_switch(str
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
diff --git a/patches/kgb-serial-hackaround.patch b/patches/kgb-serial-hackaround.patch
index 637bea469aca..b205dd54ca38 100644
--- a/patches/kgb-serial-hackaround.patch
+++ b/patches/kgb-serial-hackaround.patch
@@ -33,7 +33,7 @@ Jason.
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
#include <linux/ktime.h>
-@@ -3219,6 +3220,8 @@ void serial8250_console_write(struct uar
+@@ -3220,6 +3221,8 @@ void serial8250_console_write(struct uar
if (port->sysrq || oops_in_progress)
locked = 0;
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 03a80b8b0e80..72cdd2b3c760 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt4
++-rt5
diff --git a/patches/lockdep-Add-a-assert_in_softirq.patch b/patches/lockdep-Add-a-assert_in_softirq.patch
deleted file mode 100644
index 53c577bfdb3a..000000000000
--- a/patches/lockdep-Add-a-assert_in_softirq.patch
+++ /dev/null
@@ -1,33 +0,0 @@
-From: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Date: Tue, 10 Apr 2018 11:37:11 +0200
-Subject: [PATCH] lockdep: Add a assert_in_softirq()
-
-Instead of directly warn on wrong context, check if softirq context is
-set. This check could be a nop on RT.
-
-Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/lockdep.h | 6 ++++++
- 1 file changed, 6 insertions(+)
-
---- a/include/linux/lockdep.h
-+++ b/include/linux/lockdep.h
-@@ -608,11 +608,17 @@ do { \
- "IRQs not disabled as expected\n"); \
- } while (0)
-
-+#define lockdep_assert_in_softirq() do { \
-+ WARN_ONCE(debug_locks && !current->lockdep_recursion && \
-+ !current->softirq_context, \
-+ "Not in softirq context as expected\n"); \
-+ } while (0)
- #else
- # define might_lock(lock) do { } while (0)
- # define might_lock_read(lock) do { } while (0)
- # define lockdep_assert_irqs_enabled() do { } while (0)
- # define lockdep_assert_irqs_disabled() do { } while (0)
-+# define lockdep_assert_in_softirq() do { } while (0)
- #endif
-
- #ifdef CONFIG_LOCKDEP
diff --git a/patches/lockdep-no-softirq-accounting-on-rt.patch b/patches/lockdep-no-softirq-accounting-on-rt.patch
index 4aa6968d658f..99adf2fd0616 100644
--- a/patches/lockdep-no-softirq-accounting-on-rt.patch
+++ b/patches/lockdep-no-softirq-accounting-on-rt.patch
@@ -7,9 +7,8 @@ teach lockdep that we don't really do softirqs on -RT.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/irqflags.h | 23 +++++++++++++++--------
- include/linux/lockdep.h | 7 ++++++-
kernel/locking/lockdep.c | 2 ++
- 3 files changed, 23 insertions(+), 9 deletions(-)
+ 2 files changed, 17 insertions(+), 8 deletions(-)
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -50,26 +49,6 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#if defined(CONFIG_IRQSOFF_TRACER) || \
defined(CONFIG_PREEMPT_TRACER)
---- a/include/linux/lockdep.h
-+++ b/include/linux/lockdep.h
-@@ -608,11 +608,16 @@ do { \
- "IRQs not disabled as expected\n"); \
- } while (0)
-
--#define lockdep_assert_in_softirq() do { \
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+# define lockdep_assert_in_softirq() do { } while (0)
-+#else
-+# define lockdep_assert_in_softirq() do { \
- WARN_ONCE(debug_locks && !current->lockdep_recursion && \
- !current->softirq_context, \
- "Not in softirq context as expected\n"); \
- } while (0)
-+#endif
-+
- #else
- # define might_lock(lock) do { } while (0)
- # define might_lock_read(lock) do { } while (0)
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3843,6 +3843,7 @@ static void check_flags(unsigned long fl
diff --git a/patches/md-raid5-percpu-handling-rt-aware.patch b/patches/md-raid5-percpu-handling-rt-aware.patch
index d3f741037081..df0c39443a98 100644
--- a/patches/md-raid5-percpu-handling-rt-aware.patch
+++ b/patches/md-raid5-percpu-handling-rt-aware.patch
@@ -41,7 +41,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
}
static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh)
-@@ -6789,6 +6791,7 @@ static int raid456_cpu_up_prepare(unsign
+@@ -6788,6 +6790,7 @@ static int raid456_cpu_up_prepare(unsign
__func__, cpu);
return -ENOMEM;
}
@@ -49,7 +49,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
return 0;
}
-@@ -6799,7 +6802,6 @@ static int raid5_alloc_percpu(struct r5c
+@@ -6798,7 +6801,6 @@ static int raid5_alloc_percpu(struct r5c
conf->percpu = alloc_percpu(struct raid5_percpu);
if (!conf->percpu)
return -ENOMEM;
diff --git a/patches/mm-memcontrol-do_not_disable_irq.patch b/patches/mm-memcontrol-do_not_disable_irq.patch
index 21f0a51b94bb..b87b62475d58 100644
--- a/patches/mm-memcontrol-do_not_disable_irq.patch
+++ b/patches/mm-memcontrol-do_not_disable_irq.patch
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
{
-@@ -4540,12 +4543,12 @@ static int mem_cgroup_move_account(struc
+@@ -4543,12 +4546,12 @@ static int mem_cgroup_move_account(struc
ret = 0;
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
out_unlock:
unlock_page(page);
out:
-@@ -5488,10 +5491,10 @@ void mem_cgroup_commit_charge(struct pag
+@@ -5491,10 +5494,10 @@ void mem_cgroup_commit_charge(struct pag
commit_charge(page, memcg, lrucare);
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (do_memsw_account() && PageSwapCache(page)) {
swp_entry_t entry = { .val = page_private(page) };
-@@ -5560,7 +5563,7 @@ static void uncharge_batch(const struct
+@@ -5563,7 +5566,7 @@ static void uncharge_batch(const struct
memcg_oom_recover(ug->memcg);
}
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
__mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon);
__mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file);
__mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
-@@ -5568,7 +5571,7 @@ static void uncharge_batch(const struct
+@@ -5571,7 +5574,7 @@ static void uncharge_batch(const struct
__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
__this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages);
memcg_check_events(ug->memcg, ug->dummy_page);
@@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!mem_cgroup_is_root(ug->memcg))
css_put_many(&ug->memcg->css, nr_pages);
-@@ -5731,10 +5734,10 @@ void mem_cgroup_migrate(struct page *old
+@@ -5734,10 +5737,10 @@ void mem_cgroup_migrate(struct page *old
commit_charge(newpage, memcg, false);
@@ -88,7 +88,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
-@@ -5926,6 +5929,7 @@ void mem_cgroup_swapout(struct page *pag
+@@ -5929,6 +5932,7 @@ void mem_cgroup_swapout(struct page *pag
struct mem_cgroup *memcg, *swap_memcg;
unsigned int nr_entries;
unsigned short oldid;
@@ -96,7 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(page_count(page), page);
-@@ -5971,13 +5975,17 @@ void mem_cgroup_swapout(struct page *pag
+@@ -5974,13 +5978,17 @@ void mem_cgroup_swapout(struct page *pag
* important here to have the interrupts disabled because it is the
* only synchronisation we have for udpating the per-CPU variables.
*/
diff --git a/patches/net-3com-3c59x-Move-boomerang-vortex-conditional-int.patch b/patches/net-3com-3c59x-Move-boomerang-vortex-conditional-int.patch
index 1d7e27cf9004..09d5fe769e3a 100644
--- a/patches/net-3com-3c59x-Move-boomerang-vortex-conditional-int.patch
+++ b/patches/net-3com-3c59x-Move-boomerang-vortex-conditional-int.patch
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_irq_restore(flags);
}
#endif
-@@ -1729,8 +1729,7 @@ vortex_open(struct net_device *dev)
+@@ -1728,8 +1728,7 @@ vortex_open(struct net_device *dev)
dma_addr_t dma;
/* Use the now-standard shared IRQ implementation. */
@@ -53,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
goto err;
}
-@@ -1911,10 +1910,7 @@ static void vortex_tx_timeout(struct net
+@@ -1910,10 +1909,7 @@ static void vortex_tx_timeout(struct net
*/
unsigned long flags;
local_irq_save(flags);
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_irq_restore(flags);
}
}
-@@ -2267,9 +2263,8 @@ boomerang_start_xmit(struct sk_buff *skb
+@@ -2266,9 +2262,8 @@ boomerang_start_xmit(struct sk_buff *skb
*/
static irqreturn_t
@@ -76,7 +76,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr;
int status;
-@@ -2386,9 +2381,8 @@ vortex_interrupt(int irq, void *dev_id)
+@@ -2385,9 +2380,8 @@ vortex_interrupt(int irq, void *dev_id)
*/
static irqreturn_t
@@ -87,7 +87,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr;
int status;
-@@ -2526,6 +2520,18 @@ boomerang_interrupt(int irq, void *dev_i
+@@ -2525,6 +2519,18 @@ boomerang_interrupt(int irq, void *dev_i
return IRQ_RETVAL(handled);
}
diff --git a/patches/net-3com-3c59x-Pull-locking-out-of-ISR.patch b/patches/net-3com-3c59x-Pull-locking-out-of-ISR.patch
index ca7d066f0491..ac59d2e1fec7 100644
--- a/patches/net-3com-3c59x-Pull-locking-out-of-ISR.patch
+++ b/patches/net-3com-3c59x-Pull-locking-out-of-ISR.patch
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
-@@ -2273,7 +2273,6 @@ static irqreturn_t
+@@ -2272,7 +2272,6 @@ static irqreturn_t
unsigned int bytes_compl = 0, pkts_compl = 0;
ioaddr = vp->ioaddr;
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
status = ioread16(ioaddr + EL3_STATUS);
-@@ -2371,7 +2370,6 @@ static irqreturn_t
+@@ -2370,7 +2369,6 @@ static irqreturn_t
pr_debug("%s: exiting interrupt, status %4.4x.\n",
dev->name, status);
handler_exit:
@@ -32,7 +32,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return IRQ_RETVAL(handled);
}
-@@ -2392,12 +2390,6 @@ static irqreturn_t
+@@ -2391,12 +2389,6 @@ static irqreturn_t
ioaddr = vp->ioaddr;
@@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
vp->handling_irq = 1;
status = ioread16(ioaddr + EL3_STATUS);
-@@ -2516,7 +2508,6 @@ static irqreturn_t
+@@ -2515,7 +2507,6 @@ static irqreturn_t
dev->name, status);
handler_exit:
vp->handling_irq = 0;
@@ -53,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return IRQ_RETVAL(handled);
}
-@@ -2525,11 +2516,18 @@ vortex_boomerang_interrupt(int irq, void
+@@ -2524,11 +2515,18 @@ vortex_boomerang_interrupt(int irq, void
{
struct net_device *dev = dev_id;
struct vortex_private *vp = netdev_priv(dev);
diff --git a/patches/net-3com-3c59x-irq-save-variant-of-ISR.patch b/patches/net-3com-3c59x-irq-save-variant-of-ISR.patch
index f8d37c5e89c5..376eee4bea0b 100644
--- a/patches/net-3com-3c59x-irq-save-variant-of-ISR.patch
+++ b/patches/net-3com-3c59x-irq-save-variant-of-ISR.patch
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#endif
-@@ -1904,15 +1901,7 @@ static void vortex_tx_timeout(struct net
+@@ -1903,15 +1900,7 @@ static void vortex_tx_timeout(struct net
pr_err("%s: Interrupt posted but not delivered --"
" IRQ blocked by another device?\n", dev->name);
/* Bad idea here.. but we might as well handle a few events. */
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
if (vortex_debug > 0)
-@@ -2516,16 +2505,17 @@ vortex_boomerang_interrupt(int irq, void
+@@ -2515,16 +2504,17 @@ vortex_boomerang_interrupt(int irq, void
{
struct net_device *dev = dev_id;
struct vortex_private *vp = netdev_priv(dev);
diff --git a/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch b/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
index 0ccb76447950..6ca9dafba59e 100644
--- a/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
+++ b/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
@@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -5407,6 +5407,7 @@ bool napi_schedule_prep(struct napi_stru
+@@ -5404,6 +5404,7 @@ bool napi_schedule_prep(struct napi_stru
}
EXPORT_SYMBOL(napi_schedule_prep);
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* __napi_schedule_irqoff - schedule for receive
* @n: entry to schedule
-@@ -5418,6 +5419,7 @@ void __napi_schedule_irqoff(struct napi_
+@@ -5415,6 +5416,7 @@ void __napi_schedule_irqoff(struct napi_
____napi_schedule(this_cpu_ptr(&softnet_data), n);
}
EXPORT_SYMBOL(__napi_schedule_irqoff);
diff --git a/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch b/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch
index a1bb96f44d6b..da0d599f6add 100644
--- a/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch
+++ b/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
-@@ -481,6 +481,15 @@ static inline void write_seqlock(seqlock
+@@ -482,6 +482,15 @@ static inline void write_seqlock(seqlock
__raw_write_seqcount_begin(&sl->seqcount);
}
diff --git a/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch b/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
index f1a6a379e2d8..1d57360b807e 100644
--- a/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
+++ b/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
@@ -35,7 +35,7 @@ Cc: stable-rt@vger.kernel.org
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -8708,7 +8708,7 @@ static int dev_cpu_dead(unsigned int old
+@@ -8705,7 +8705,7 @@ static int dev_cpu_dead(unsigned int old
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
diff --git a/patches/net-mac808211-rc-warn_on.patch b/patches/net-mac808211-rc-warn_on.patch
deleted file mode 100644
index 7590fa5c5552..000000000000
--- a/patches/net-mac808211-rc-warn_on.patch
+++ /dev/null
@@ -1,53 +0,0 @@
-From: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Date: Tue, 10 Apr 2018 11:37:12 +0200
-Subject: [PATCH] net: mac808211: mac802154: use lockdep_assert_in_softirq() instead own warning
-
-The warning in ieee802154_rx() and ieee80211_rx_napi() is there to ensure
-the softirq context for the subsequent netif_receive_skb() call. The check
-could be moved into the netif_receive_skb() function to prevent all calling
-functions implement the checks on their own. Use the lockdep variant for
-softirq context check. While at it, add a lockdep based check for irq
-enabled as mentioned in the comment above netif_receive_skb().
-
-Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- net/core/dev.c | 3 +++
- net/mac80211/rx.c | 2 --
- net/mac802154/rx.c | 2 --
- 3 files changed, 3 insertions(+), 4 deletions(-)
-
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -4720,6 +4720,9 @@ static int netif_receive_skb_internal(st
- */
- int netif_receive_skb(struct sk_buff *skb)
- {
-+ lockdep_assert_irqs_enabled();
-+ lockdep_assert_in_softirq();
-+
- trace_netif_receive_skb_entry(skb);
-
- return netif_receive_skb_internal(skb);
---- a/net/mac80211/rx.c
-+++ b/net/mac80211/rx.c
-@@ -4245,8 +4245,6 @@ void ieee80211_rx_napi(struct ieee80211_
- struct ieee80211_supported_band *sband;
- struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
-
-- WARN_ON_ONCE(softirq_count() == 0);
--
- if (WARN_ON(status->band >= NUM_NL80211_BANDS))
- goto drop;
-
---- a/net/mac802154/rx.c
-+++ b/net/mac802154/rx.c
-@@ -258,8 +258,6 @@ void ieee802154_rx(struct ieee802154_loc
- {
- u16 crc;
-
-- WARN_ON_ONCE(softirq_count() == 0);
--
- if (local->suspended)
- goto drop;
-
diff --git a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
index d186fcb8733d..4f808cc86e29 100644
--- a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
+++ b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void raise_softirq_irqoff(unsigned int nr)
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -5812,7 +5812,7 @@ static __latent_entropy void net_rx_acti
+@@ -5809,7 +5809,7 @@ static __latent_entropy void net_rx_acti
list_splice_tail(&repoll, &list);
list_splice(&list, &sd->poll_list);
if (!list_empty(&sd->poll_list))
diff --git a/patches/net-use-task_struct-instead-of-CPU-number-as-the-que.patch b/patches/net-use-task_struct-instead-of-CPU-number-as-the-que.patch
index 255cddd73846..c2ed1137b131 100644
--- a/patches/net-use-task_struct-instead-of-CPU-number-as-the-que.patch
+++ b/patches/net-use-task_struct-instead-of-CPU-number-as-the-que.patch
@@ -134,7 +134,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT))
goto recursion_alert;
-@@ -7762,7 +7766,7 @@ static void netdev_init_one_queue(struct
+@@ -7759,7 +7763,7 @@ static void netdev_init_one_queue(struct
/* Initialize queue lock */
spin_lock_init(&queue->_xmit_lock);
netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index 52a6b7883148..124128c89054 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -140,7 +140,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1628,6 +1628,44 @@ static inline int test_tsk_need_resched(
+@@ -1636,6 +1636,44 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -233,7 +233,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
default PREEMPT_NONE
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -520,6 +520,48 @@ void resched_curr(struct rq *rq)
+@@ -521,6 +521,48 @@ void resched_curr(struct rq *rq)
trace_sched_wake_idle_without_ipi(cpu);
}
@@ -282,7 +282,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
-@@ -2455,6 +2497,9 @@ int sched_fork(unsigned long clone_flags
+@@ -2456,6 +2498,9 @@ int sched_fork(unsigned long clone_flags
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -292,7 +292,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -3437,6 +3482,7 @@ static void __sched notrace __schedule(b
+@@ -3438,6 +3483,7 @@ static void __sched notrace __schedule(b
next = pick_next_task(rq, prev, &rf);
clear_tsk_need_resched(prev);
@@ -300,7 +300,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_preempt_need_resched();
if (likely(prev != next)) {
-@@ -3626,6 +3672,30 @@ static void __sched notrace preempt_sche
+@@ -3627,6 +3673,30 @@ static void __sched notrace preempt_sche
} while (need_resched());
}
@@ -331,7 +331,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
-@@ -3640,7 +3710,8 @@ asmlinkage __visible void __sched notrac
+@@ -3641,7 +3711,8 @@ asmlinkage __visible void __sched notrac
*/
if (likely(!preemptible()))
return;
@@ -341,7 +341,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
-@@ -3667,6 +3738,9 @@ asmlinkage __visible void __sched notrac
+@@ -3668,6 +3739,9 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
@@ -351,7 +351,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
/*
* Because the function tracer can trace preempt_count_sub()
-@@ -5430,7 +5504,9 @@ void init_idle(struct task_struct *idle,
+@@ -5431,7 +5505,9 @@ void init_idle(struct task_struct *idle,
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
@@ -362,7 +362,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The idle tasks have their own, simple scheduling class:
*/
-@@ -7146,6 +7222,7 @@ void migrate_disable(void)
+@@ -7151,6 +7227,7 @@ void migrate_disable(void)
}
preempt_disable();
@@ -370,7 +370,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pin_current_cpu();
migrate_disable_update_cpus_allowed(p);
-@@ -7213,6 +7290,7 @@ void migrate_enable(void)
+@@ -7218,6 +7295,7 @@ void migrate_enable(void)
arg.dest_cpu = dest_cpu;
unpin_current_cpu();
@@ -378,7 +378,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_enable();
stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
tlb_migrate_finish(p->mm);
-@@ -7221,6 +7299,7 @@ void migrate_enable(void)
+@@ -7226,6 +7304,7 @@ void migrate_enable(void)
}
}
unpin_current_cpu();
diff --git a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
index 8db7adcc39d8..7f6978e62488 100644
--- a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+++ b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
(task->flags & PF_FROZEN) == 0 && \
(task->state & TASK_NOLOAD) == 0)
-@@ -1612,6 +1608,51 @@ static inline int test_tsk_need_resched(
+@@ -1620,6 +1616,51 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock_irq(&task->sighand->siglock);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1369,6 +1369,18 @@ int migrate_swap(struct task_struct *cur
+@@ -1370,6 +1370,18 @@ int migrate_swap(struct task_struct *cur
return ret;
}
@@ -134,7 +134,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* wait_task_inactive - wait for a thread to unschedule.
*
-@@ -1413,7 +1425,7 @@ unsigned long wait_task_inactive(struct
+@@ -1414,7 +1426,7 @@ unsigned long wait_task_inactive(struct
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
@@ -143,7 +143,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
cpu_relax();
}
-@@ -1428,7 +1440,8 @@ unsigned long wait_task_inactive(struct
+@@ -1429,7 +1441,8 @@ unsigned long wait_task_inactive(struct
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
diff --git a/patches/radix-tree-use-local-locks.patch b/patches/radix-tree-use-local-locks.patch
index 27603d9df921..562982d81db4 100644
--- a/patches/radix-tree-use-local-locks.patch
+++ b/patches/radix-tree-use-local-locks.patch
@@ -136,7 +136,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static unsigned radix_tree_load_root(const struct radix_tree_root *root,
struct radix_tree_node **nodep, unsigned long *maxindex)
{
-@@ -2102,10 +2110,16 @@ EXPORT_SYMBOL(radix_tree_tagged);
+@@ -2100,10 +2108,16 @@ EXPORT_SYMBOL(radix_tree_tagged);
void idr_preload(gfp_t gfp_mask)
{
if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
@@ -154,7 +154,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* ida_pre_get - reserve resources for ida allocation
* @ida: ida handle
-@@ -2122,7 +2136,7 @@ int ida_pre_get(struct ida *ida, gfp_t g
+@@ -2120,7 +2134,7 @@ int ida_pre_get(struct ida *ida, gfp_t g
* to return to the ida_pre_get() step.
*/
if (!__radix_tree_preload(gfp, IDA_PRELOAD_SIZE))
diff --git a/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch b/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch
index 84b956f12c4f..cfd8647de304 100644
--- a/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch
+++ b/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch
@@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7109,6 +7109,47 @@ const u32 sched_prio_to_wmult[40] = {
+@@ -7114,6 +7114,47 @@ const u32 sched_prio_to_wmult[40] = {
#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
@@ -128,7 +128,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void migrate_disable(void)
{
struct task_struct *p = current;
-@@ -7132,10 +7173,9 @@ void migrate_disable(void)
+@@ -7137,10 +7178,9 @@ void migrate_disable(void)
}
preempt_disable();
@@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
preempt_enable();
}
-@@ -7167,9 +7207,8 @@ void migrate_enable(void)
+@@ -7172,9 +7212,8 @@ void migrate_enable(void)
preempt_disable();
diff --git a/patches/rtmutex-add-sleeping-lock-implementation.patch b/patches/rtmutex-add-sleeping-lock-implementation.patch
index 2e1bc1b5be5e..5394076f2bac 100644
--- a/patches/rtmutex-add-sleeping-lock-implementation.patch
+++ b/patches/rtmutex-add-sleeping-lock-implementation.patch
@@ -1123,7 +1123,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
# include "rtmutex-debug.h"
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -429,9 +429,15 @@ static bool set_nr_if_polling(struct tas
+@@ -430,9 +430,15 @@ static bool set_nr_if_polling(struct tas
#endif
#endif
@@ -1141,7 +1141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Atomically grab the task, if ->wake_q is !nil already it means
-@@ -453,24 +459,32 @@ void wake_q_add(struct wake_q_head *head
+@@ -454,24 +460,32 @@ void wake_q_add(struct wake_q_head *head
head->lastp = &node->next;
}
diff --git a/patches/rtmutex-annotate-sleeping-lock-context.patch b/patches/rtmutex-annotate-sleeping-lock-context.patch
index 2d5f02176132..6e69c13f502e 100644
--- a/patches/rtmutex-annotate-sleeping-lock-context.patch
+++ b/patches/rtmutex-annotate-sleeping-lock-context.patch
@@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
-@@ -1723,6 +1726,23 @@ static __always_inline bool need_resched
+@@ -1731,6 +1734,23 @@ static __always_inline bool need_resched
return unlikely(tif_need_resched());
}
diff --git a/patches/sched-Prevent-task-state-corruption-by-spurious-lock.patch b/patches/sched-Prevent-task-state-corruption-by-spurious-lock.patch
index 5c7799afb0a4..f2cbcd6c6512 100644
--- a/patches/sched-Prevent-task-state-corruption-by-spurious-lock.patch
+++ b/patches/sched-Prevent-task-state-corruption-by-spurious-lock.patch
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2217,7 +2217,7 @@ EXPORT_SYMBOL(wake_up_process);
+@@ -2218,7 +2218,7 @@ EXPORT_SYMBOL(wake_up_process);
*/
int wake_up_lock_sleeper(struct task_struct *p)
{
diff --git a/patches/sched-let-softirq_count-return-0-if-inside-local_bh_.patch b/patches/sched-let-softirq_count-return-0-if-inside-local_bh_.patch
index 53ef5fc50d85..9df3b3e9443a 100644
--- a/patches/sched-let-softirq_count-return-0-if-inside-local_bh_.patch
+++ b/patches/sched-let-softirq_count-return-0-if-inside-local_bh_.patch
@@ -11,6 +11,7 @@ I don't see any fallout with this change. Furthermore, all checks like
"WARN_ON(!softirq_count())" will work and we can drop the workaround we
currently have in the queue.
+Acked-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/preempt.h | 2 +-
diff --git a/patches/sched-limit-nr-migrate.patch b/patches/sched-limit-nr-migrate.patch
index 59f398ed4b2c..821b0de23764 100644
--- a/patches/sched-limit-nr-migrate.patch
+++ b/patches/sched-limit-nr-migrate.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -64,7 +64,11 @@ const_debug unsigned int sysctl_sched_fe
+@@ -65,7 +65,11 @@ const_debug unsigned int sysctl_sched_fe
* Number of tasks to iterate in a single balance run.
* Limited because this is done with IRQs disabled.
*/
diff --git a/patches/sched-might-sleep-do-not-account-rcu-depth.patch b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
index d635635ba1a9..c488ef7bdf6f 100644
--- a/patches/sched-might-sleep-do-not-account-rcu-depth.patch
+++ b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Internal to kernel */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -6137,7 +6137,7 @@ void __init sched_init(void)
+@@ -6138,7 +6138,7 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
diff --git a/patches/sched-mmdrop-delayed.patch b/patches/sched-mmdrop-delayed.patch
index 2017307b6dd6..870344eb86a8 100644
--- a/patches/sched-mmdrop-delayed.patch
+++ b/patches/sched-mmdrop-delayed.patch
@@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct mm_struct *mm;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2766,9 +2766,13 @@ static struct rq *finish_task_switch(str
+@@ -2767,9 +2767,13 @@ static struct rq *finish_task_switch(str
* provided by mmdrop(),
* - a sync_core for SYNC_CORE.
*/
@@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead)
-@@ -5532,6 +5536,8 @@ void sched_setnuma(struct task_struct *p
+@@ -5533,6 +5537,8 @@ void sched_setnuma(struct task_struct *p
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_HOTPLUG_CPU
@@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Ensure that the idle task is using init_mm right before its CPU goes
* offline.
-@@ -5546,7 +5552,12 @@ void idle_task_exit(void)
+@@ -5547,7 +5553,12 @@ void idle_task_exit(void)
switch_mm(mm, &init_mm, current);
finish_arch_post_lock_switch();
}
@@ -115,7 +115,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -5849,6 +5860,10 @@ int sched_cpu_dying(unsigned int cpu)
+@@ -5850,6 +5861,10 @@ int sched_cpu_dying(unsigned int cpu)
update_max_interval();
nohz_balance_exit_idle(cpu);
hrtick_clear(rq);
diff --git a/patches/sched-rt-mutex-wakeup.patch b/patches/sched-rt-mutex-wakeup.patch
index c031949a6b35..0a72478a23d7 100644
--- a/patches/sched-rt-mutex-wakeup.patch
+++ b/patches/sched-rt-mutex-wakeup.patch
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* This begins the randomizable portion of task_struct. Only
-@@ -1521,6 +1523,7 @@ extern struct task_struct *find_get_task
+@@ -1529,6 +1531,7 @@ extern struct task_struct *find_get_task
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2026,8 +2026,25 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2027,8 +2027,25 @@ try_to_wake_up(struct task_struct *p, un
*/
raw_spin_lock_irqsave(&p->pi_lock, flags);
smp_mb__after_spinlock();
@@ -63,7 +63,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
trace_sched_waking(p);
-@@ -2191,6 +2208,18 @@ int wake_up_process(struct task_struct *
+@@ -2192,6 +2209,18 @@ int wake_up_process(struct task_struct *
}
EXPORT_SYMBOL(wake_up_process);
diff --git a/patches/sched-tracing-Fix-trace_sched_pi_setprio-for-deboost.patch b/patches/sched-tracing-Fix-trace_sched_pi_setprio-for-deboost.patch
new file mode 100644
index 000000000000..ba42577788a6
--- /dev/null
+++ b/patches/sched-tracing-Fix-trace_sched_pi_setprio-for-deboost.patch
@@ -0,0 +1,52 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 24 May 2018 15:26:48 +0200
+Subject: [PATCH] sched, tracing: Fix trace_sched_pi_setprio() for deboosting
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Since the following commit:
+
+ b91473ff6e97 ("sched,tracing: Update trace_sched_pi_setprio()")
+
+the sched_pi_setprio trace point shows the "newprio" during a deboost:
+
+ |futex sched_pi_setprio: comm=futex_requeue_p pid"34 oldprio˜ newprio=3D98
+ |futex sched_switch: prev_comm=futex_requeue_p prev_pid"34 prev_prio=120
+
+This patch open codes __rt_effective_prio() in the tracepoint as the
+'newprio' to get the old behaviour back / the correct priority:
+
+ |futex sched_pi_setprio: comm=futex_requeue_p pid"20 oldprio˜ newprio=3D120
+ |futex sched_switch: prev_comm=futex_requeue_p prev_pid"20 prev_prio=120
+
+Peter suggested to open code the new priority so people using tracehook
+could get the deadline data out.
+
+Reported-by: Mansky Christian <man@keba.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Fixes: b91473ff6e97 ("sched,tracing: Update trace_sched_pi_setprio()")
+Link: http://lkml.kernel.org/r/20180524132647.gg6ziuogczdmjjzu@linutronix.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+---
+ include/trace/events/sched.h | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/include/trace/events/sched.h
++++ b/include/trace/events/sched.h
+@@ -435,7 +435,9 @@ TRACE_EVENT(sched_pi_setprio,
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->oldprio = tsk->prio;
+- __entry->newprio = pi_task ? pi_task->prio : tsk->prio;
++ __entry->newprio = pi_task ?
++ min(tsk->normal_prio, pi_task->prio) :
++ tsk->normal_prio;
+ /* XXX SCHED_DEADLINE bits missing */
+ ),
+
diff --git a/patches/sched-ttwu-ensure-success-return-is-correct.patch b/patches/sched-ttwu-ensure-success-return-is-correct.patch
index 32afd2a4a96f..027fd3ae2a13 100644
--- a/patches/sched-ttwu-ensure-success-return-is-correct.patch
+++ b/patches/sched-ttwu-ensure-success-return-is-correct.patch
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2033,8 +2033,10 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2034,8 +2034,10 @@ try_to_wake_up(struct task_struct *p, un
* if the wakeup condition is true.
*/
if (!(wake_flags & WF_LOCK_SLEEPER)) {
diff --git a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
index 7e939030ef9c..f7395c9d4c58 100644
--- a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
+++ b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3463,8 +3463,10 @@ static void __sched notrace __schedule(b
+@@ -3464,8 +3464,10 @@ static void __sched notrace __schedule(b
* If a worker went to sleep, notify and ask workqueue
* whether it wants to wake up a task to maintain
* concurrency.
diff --git a/patches/seqlock-provide-the-same-ordering-semantics-as-mainl.patch b/patches/seqlock-provide-the-same-ordering-semantics-as-mainl.patch
new file mode 100644
index 000000000000..0b00532d1c3c
--- /dev/null
+++ b/patches/seqlock-provide-the-same-ordering-semantics-as-mainl.patch
@@ -0,0 +1,29 @@
+From: Julia Cartwright <julia@ni.com>
+Date: Thu, 26 Apr 2018 15:02:03 -0500
+Subject: [PATCH] seqlock: provide the same ordering semantics as mainline
+
+The mainline implementation of read_seqbegin() orders prior loads w.r.t.
+the read-side critical section. Fixup the RT writer-boosting
+implementation to provide the same guarantee.
+
+Also, while we're here, update the usage of ACCESS_ONCE() to use
+READ_ONCE().
+
+Fixes: e69f15cf77c23 ("seqlock: Prevent rt starvation")
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Julia Cartwright <julia@ni.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/seqlock.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -461,6 +461,7 @@ static inline unsigned read_seqbegin(seq
+ spin_unlock_wait(&sl->lock);
+ goto repeat;
+ }
++ smp_rmb();
+ return ret;
+ }
+ #endif
diff --git a/patches/series b/patches/series
index 2921f57b80e8..04b8a910b519 100644
--- a/patches/series
+++ b/patches/series
@@ -112,6 +112,8 @@ percpu_ida-Use-_irqsave-instead-of-local_irq_save-sp.patch
xen-9pfs-don-t-inclide-rwlock.h-directly.patch
ACPICA-provide-abstraction-for-raw_spinlock_t.patch
ACPICA-Convert-acpi_gbl_hardware-lock-back-to-an-acp.patch
+mfd-syscon-atmel-smc-include-string.h.patch
+sched-tracing-Fix-trace_sched_pi_setprio-for-deboost.patch
############################################################
# POSTED by others
@@ -129,10 +131,13 @@ ACPICA-Convert-acpi_gbl_hardware-lock-back-to-an-acp.patch
############################################################
# POSTED
############################################################
-mfd-syscon-atmel-smc-include-string.h.patch
Revert-mm-vmstat.c-fix-vmstat_update-preemption-BUG.patch
arm-convert-boot-lock-to-raw.patch
x86-io-apic-migra-no-unmask.patch
+0001-PM-suspend-Prevent-might-sleep-splats.patch
+0002-PM-wakeup-Make-events_lock-a-RAW_SPINLOCK.patch
+0003-PM-s2idle-Make-s2idle_wait_head-swait-based.patch
+0004-PM-wakeup-Make-s2idle_lock-a-RAW_SPINLOCK.patch
############################################################
# Ready for posting
@@ -172,9 +177,6 @@ timekeeping-split-jiffies-lock.patch
# PTRACE/SIGNAL crap
signal-revert-ptrace-preempt-magic.patch
-# PM
-suspend-prevernt-might-sleep-splats.patch
-
# NETWORKING
net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
@@ -192,24 +194,21 @@ SCSI-qla2xxx-remove-irq-save-in-qla2x00_poll.patch
libata-remove-ata_sff_data_xfer_noirq.patch
posix-cpu-timers-remove-lockdep_assert_irqs_disabled.patch
usb-do-not-disable-interrupts-in-giveback.patch
-# refcount_dec_and_lock_irqsave
+# atomic_dec_and_lock_irqsave / refcount_dec_and_lock_irqsave
+0001-spinlock-atomic_dec_and_lock-Add-an-irqsave-variant.patch
+0002-drivers-md-raid5-Use-irqsave-variant-of-atomic_dec_a.patch
+0003-drivers-md-raid5-Do-not-disable-irq-on-release_inact.patch
0001-bdi-use-refcount_t-for-reference-counting-instead-at.patch
0002-userns-use-refcount_t-for-reference-counting-instead.patch
-0003-md-raid5-use-refcount_t-for-reference-counting-inste.patch
0004-locking-refcount-implement-refcount_dec_and_lock_irq.patch
0005-bdi-Use-irqsave-variant-of-refcount_dec_and_lock.patch
0006-userns-Use-irqsave-variant-of-refcount_dec_and_lock.patch
-0007-md-raid5-Use-irqsave-variant-of-refcount_dec_and_loc.patch
-0008-md-raid5-Do-not-disable-irq-on-release_inactive_stri.patch
#iommu
iommu-amd-hide-unused-iommu_table_lock.patch
iommu-amd-Prevent-possible-null-pointer-dereference-.patch
iommu-amd-Cleanup-locking-in-__attach-detach_device.patch
iommu-amd-Do-not-flush-when-device-is-busy.patch
-#net
-lockdep-Add-a-assert_in_softirq.patch
-net-mac808211-rc-warn_on.patch
##################################################
# REAL RT STUFF starts here
@@ -445,6 +444,7 @@ debugobjects-rt.patch
# SEQLOCKS
seqlock-prevent-rt-starvation.patch
+seqlock-provide-the-same-ordering-semantics-as-mainl.patch
# NETWORKING
sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
diff --git a/patches/skbufhead-raw-lock.patch b/patches/skbufhead-raw-lock.patch
index 7634a414acbb..adaf26c99641 100644
--- a/patches/skbufhead-raw-lock.patch
+++ b/patches/skbufhead-raw-lock.patch
@@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
}
-@@ -4746,7 +4746,7 @@ static void flush_backlog(struct work_st
+@@ -4743,7 +4743,7 @@ static void flush_backlog(struct work_st
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->input_pkt_queue);
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
input_queue_head_incr(sd);
}
}
-@@ -4756,11 +4756,14 @@ static void flush_backlog(struct work_st
+@@ -4753,11 +4753,14 @@ static void flush_backlog(struct work_st
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->process_queue);
@@ -90,7 +90,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void flush_all_backlogs(void)
-@@ -5308,7 +5311,9 @@ static int process_backlog(struct napi_s
+@@ -5305,7 +5308,9 @@ static int process_backlog(struct napi_s
while (again) {
struct sk_buff *skb;
@@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
__netif_receive_skb(skb);
rcu_read_unlock();
-@@ -5316,9 +5321,9 @@ static int process_backlog(struct napi_s
+@@ -5313,9 +5318,9 @@ static int process_backlog(struct napi_s
if (++work >= quota)
return work;
@@ -111,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rps_lock(sd);
if (skb_queue_empty(&sd->input_pkt_queue)) {
/*
-@@ -5758,13 +5763,21 @@ static __latent_entropy void net_rx_acti
+@@ -5755,13 +5760,21 @@ static __latent_entropy void net_rx_acti
unsigned long time_limit = jiffies +
usecs_to_jiffies(netdev_budget_usecs);
int budget = netdev_budget;
@@ -133,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for (;;) {
struct napi_struct *n;
-@@ -8699,6 +8712,9 @@ static int dev_cpu_dead(unsigned int old
+@@ -8696,6 +8709,9 @@ static int dev_cpu_dead(unsigned int old
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
@@ -143,7 +143,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return 0;
}
-@@ -9004,8 +9020,9 @@ static int __init net_dev_init(void)
+@@ -9001,8 +9017,9 @@ static int __init net_dev_init(void)
INIT_WORK(flush, flush_backlog);
diff --git a/patches/softirq-preempt-fix-3-re.patch b/patches/softirq-preempt-fix-3-re.patch
index 7eef3be97f53..9bdb430b82c2 100644
--- a/patches/softirq-preempt-fix-3-re.patch
+++ b/patches/softirq-preempt-fix-3-re.patch
@@ -135,7 +135,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
-@@ -5270,12 +5273,14 @@ static void net_rps_action_and_irq_enabl
+@@ -5267,12 +5270,14 @@ static void net_rps_action_and_irq_enabl
sd->rps_ipi_list = NULL;
local_irq_enable();
@@ -150,7 +150,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -5353,6 +5358,7 @@ void __napi_schedule(struct napi_struct
+@@ -5350,6 +5355,7 @@ void __napi_schedule(struct napi_struct
local_irq_save(flags);
____napi_schedule(this_cpu_ptr(&softnet_data), n);
local_irq_restore(flags);
@@ -158,7 +158,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__napi_schedule);
-@@ -8677,6 +8683,7 @@ static int dev_cpu_dead(unsigned int old
+@@ -8674,6 +8680,7 @@ static int dev_cpu_dead(unsigned int old
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
diff --git a/patches/timers-prepare-for-full-preemption.patch b/patches/timers-prepare-for-full-preemption.patch
index e0d3f4b433f3..bf325b85a124 100644
--- a/patches/timers-prepare-for-full-preemption.patch
+++ b/patches/timers-prepare-for-full-preemption.patch
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
# define del_timer_sync(t) del_timer(t)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -525,11 +525,14 @@ void resched_cpu(int cpu)
+@@ -526,11 +526,14 @@ void resched_cpu(int cpu)
*/
int get_nohz_timer_target(void)
{
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
for_each_domain(cpu, sd) {
-@@ -548,6 +551,8 @@ int get_nohz_timer_target(void)
+@@ -549,6 +552,8 @@ int get_nohz_timer_target(void)
cpu = housekeeping_any_cpu(HK_FLAG_TIMER);
unlock:
rcu_read_unlock();
diff --git a/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch b/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
index 4f824abb29db..6b5d7dea78f1 100644
--- a/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
+++ b/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
-@@ -3217,10 +3217,8 @@ void serial8250_console_write(struct uar
+@@ -3218,10 +3218,8 @@ void serial8250_console_write(struct uar
serial8250_rpm_get(up);
diff --git a/patches/workqueue-distangle-from-rq-lock.patch b/patches/workqueue-distangle-from-rq-lock.patch
index 1b0e95c35975..dbc0560f667e 100644
--- a/patches/workqueue-distangle-from-rq-lock.patch
+++ b/patches/workqueue-distangle-from-rq-lock.patch
@@ -31,7 +31,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1726,10 +1726,6 @@ static inline void ttwu_activate(struct
+@@ -1727,10 +1727,6 @@ static inline void ttwu_activate(struct
{
activate_task(rq, p, en_flags);
p->on_rq = TASK_ON_RQ_QUEUED;
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2170,56 +2166,6 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2171,56 +2167,6 @@ try_to_wake_up(struct task_struct *p, un
}
/**
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
*
-@@ -3485,21 +3431,6 @@ static void __sched notrace __schedule(b
+@@ -3486,21 +3432,6 @@ static void __sched notrace __schedule(b
atomic_inc(&rq->nr_iowait);
delayacct_blkio_start();
}
@@ -121,7 +121,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
switch_count = &prev->nvcsw;
}
-@@ -3574,6 +3505,14 @@ static inline void sched_submit_work(str
+@@ -3575,6 +3506,14 @@ static inline void sched_submit_work(str
{
if (!tsk->state || tsk_is_pi_blocked(tsk))
return;
@@ -136,7 +136,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
-@@ -3582,6 +3521,12 @@ static inline void sched_submit_work(str
+@@ -3583,6 +3522,12 @@ static inline void sched_submit_work(str
blk_schedule_flush_plug(tsk);
}
@@ -149,7 +149,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
asmlinkage __visible void __sched schedule(void)
{
struct task_struct *tsk = current;
-@@ -3592,6 +3537,7 @@ asmlinkage __visible void __sched schedu
+@@ -3593,6 +3538,7 @@ asmlinkage __visible void __sched schedu
__schedule(false);
sched_preempt_enable_no_resched();
} while (need_resched());
diff --git a/patches/workqueue-prevent-deadlock-stall.patch b/patches/workqueue-prevent-deadlock-stall.patch
index c4b51e767694..b5c60e78ee99 100644
--- a/patches/workqueue-prevent-deadlock-stall.patch
+++ b/patches/workqueue-prevent-deadlock-stall.patch
@@ -43,7 +43,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3549,9 +3549,8 @@ void __noreturn do_task_dead(void)
+@@ -3550,9 +3550,8 @@ void __noreturn do_task_dead(void)
static inline void sched_submit_work(struct task_struct *tsk)
{
@@ -54,7 +54,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
/*
* If a worker went to sleep, notify and ask workqueue whether
* it wants to wake up a task to maintain concurrency.
-@@ -3559,6 +3558,10 @@ static inline void sched_submit_work(str
+@@ -3560,6 +3559,10 @@ static inline void sched_submit_work(str
if (tsk->flags & PF_WQ_WORKER)
wq_worker_sleeping(tsk);
diff --git a/patches/x86-kvm-require-const-tsc-for-rt.patch b/patches/x86-kvm-require-const-tsc-for-rt.patch
index bfd8ee06bab2..b254f2135da4 100644
--- a/patches/x86-kvm-require-const-tsc-for-rt.patch
+++ b/patches/x86-kvm-require-const-tsc-for-rt.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -6391,6 +6391,13 @@ int kvm_arch_init(void *opaque)
+@@ -6386,6 +6386,13 @@ int kvm_arch_init(void *opaque)
goto out;
}
diff --git a/patches/x86-preempt-lazy.patch b/patches/x86-preempt-lazy.patch
index 61f11b65a82a..237445ecc387 100644
--- a/patches/x86-preempt-lazy.patch
+++ b/patches/x86-preempt-lazy.patch
@@ -176,7 +176,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
/*
-@@ -82,6 +89,7 @@ struct thread_info {
+@@ -83,6 +90,7 @@ struct thread_info {
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SECCOMP 8 /* secure computing */
@@ -184,7 +184,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
#define TIF_UPROBE 12 /* breakpointed or singlestepping */
#define TIF_PATCH_PENDING 13 /* pending live patching update */
-@@ -108,6 +116,7 @@ struct thread_info {
+@@ -110,6 +118,7 @@ struct thread_info {
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
@@ -192,7 +192,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
-@@ -149,6 +158,8 @@ struct thread_info {
+@@ -151,6 +160,8 @@ struct thread_info {
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)