diff options
author | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2017-06-23 16:19:54 +0200 |
---|---|---|
committer | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2017-06-23 16:19:54 +0200 |
commit | d5c61b75e5e186b88359d2d5a97cb860afedfe4b (patch) | |
tree | e0107be368d0922bfcd392202ee0e38d8216d34e | |
parent | 14ebc3f86b3b642d319b33c4bfc4525be2649710 (diff) | |
download | linux-rt-d5c61b75e5e186b88359d2d5a97cb860afedfe4b.tar.gz |
[ANNOUNCE] v4.9.33-rt23v4.9.33-rt23-patches
Dear RT folks!
I'm pleased to announce the v4.9.33-rt23 patch set.
Changes since v4.9.33-rt22:
- A changed CPU mask while a task was in a migrate_disable() section
did not run all its scheduler hooks after a migrate_enable(). Also
it did not immediatelly switch the CPU if its CPU mask was did not
contain the current CPU mask. Noticed while re-doing the migrate
disable code for v4.11 and also reported by Daniel Bristot.
- The removal of TASK_ALL in the last release uncovered a bug where we
mixed normal wake ups and wake ups made for waiters of sleeping
spinlock. Reported by Mike Galbraith.
Known issues
- CPU hotplug got a little better but can deadlock.
The delta patch against v4.9.33-rt22 is appended below and can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.9/incr/patch-4.9.33-rt22-rt23.patch.xz
You can get this release via the git tree at:
git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.9.33-rt23
The RT patch against v4.9.33 can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patch-4.9.33-rt23.patch.xz
The split quilt queue is available at:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.33-rt23.tar.xz
Sebastian
diff --git a/include/linux/sched.h b/include/linux/sched.h
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1014,8 +1014,20 @@ struct wake_q_head {
#define WAKE_Q(name) \
struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
-extern void wake_q_add(struct wake_q_head *head,
- struct task_struct *task);
+extern void __wake_q_add(struct wake_q_head *head,
+ struct task_struct *task, bool sleeper);
+static inline void wake_q_add(struct wake_q_head *head,
+ struct task_struct *task)
+{
+ __wake_q_add(head, task, false);
+}
+
+static inline void wake_q_add_sleeper(struct wake_q_head *head,
+ struct task_struct *task)
+{
+ __wake_q_add(head, task, true);
+}
+
extern void __wake_up_q(struct wake_q_head *head, bool sleeper);
static inline void wake_up_q(struct wake_q_head *head)
@@ -1535,6 +1547,7 @@ struct task_struct {
unsigned int policy;
#ifdef CONFIG_PREEMPT_RT_FULL
int migrate_disable;
+ int migrate_disable_update;
# ifdef CONFIG_SCHED_DEBUG
int migrate_disable_atomic;
# endif
@@ -1745,6 +1758,7 @@ struct task_struct {
raw_spinlock_t pi_lock;
struct wake_q_node wake_q;
+ struct wake_q_node wake_q_sleeper;
#ifdef CONFIG_RT_MUTEXES
/* PI waiters blocked on a rt_mutex held by this task */
diff --git a/kernel/fork.c b/kernel/fork.c
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -558,6 +558,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
tsk->splice_pipe = NULL;
tsk->task_frag.page = NULL;
tsk->wake_q.next = NULL;
+ tsk->wake_q_sleeper.next = NULL;
account_kernel_stack(tsk, 1);
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1506,7 +1506,7 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
*/
preempt_disable();
if (waiter->savestate)
- wake_q_add(wake_sleeper_q, waiter->task);
+ wake_q_add_sleeper(wake_sleeper_q, waiter->task);
else
wake_q_add(wake_q, waiter->task);
raw_spin_unlock(¤t->pi_lock);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -430,9 +430,15 @@ static bool set_nr_if_polling(struct task_struct *p)
#endif
#endif
-void wake_q_add(struct wake_q_head *head, struct task_struct *task)
+void __wake_q_add(struct wake_q_head *head, struct task_struct *task,
+ bool sleeper)
{
- struct wake_q_node *node = &task->wake_q;
+ struct wake_q_node *node;
+
+ if (sleeper)
+ node = &task->wake_q_sleeper;
+ else
+ node = &task->wake_q;
/*
* Atomically grab the task, if ->wake_q is !nil already it means
@@ -461,11 +467,17 @@ void __wake_up_q(struct wake_q_head *head, bool sleeper)
while (node != WAKE_Q_TAIL) {
struct task_struct *task;
- task = container_of(node, struct task_struct, wake_q);
+ if (sleeper)
+ task = container_of(node, struct task_struct, wake_q_sleeper);
+ else
+ task = container_of(node, struct task_struct, wake_q);
BUG_ON(!task);
/* task can safely be re-inserted now */
node = node->next;
- task->wake_q.next = NULL;
+ if (sleeper)
+ task->wake_q_sleeper.next = NULL;
+ else
+ task->wake_q.next = NULL;
/*
* wake_up_process() implies a wmb() to pair with the queueing
@@ -1138,18 +1150,14 @@ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_ma
p->nr_cpus_allowed = cpumask_weight(new_mask);
}
-void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+static void __do_set_cpus_allowed_tail(struct task_struct *p,
+ const struct cpumask *new_mask)
{
struct rq *rq = task_rq(p);
bool queued, running;
lockdep_assert_held(&p->pi_lock);
- if (__migrate_disabled(p)) {
- cpumask_copy(&p->cpus_allowed, new_mask);
- return;
- }
-
queued = task_on_rq_queued(p);
running = task_current(rq, p);
@@ -1172,6 +1180,20 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
set_curr_task(rq, p);
}
+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+{
+ if (__migrate_disabled(p)) {
+ lockdep_assert_held(&p->pi_lock);
+
+ cpumask_copy(&p->cpus_allowed, new_mask);
+#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP)
+ p->migrate_disable_update = 1;
+#endif
+ return;
+ }
+ __do_set_cpus_allowed_tail(p, new_mask);
+}
+
static DEFINE_PER_CPU(struct cpumask, sched_cpumasks);
static DEFINE_MUTEX(sched_down_mutex);
static cpumask_t sched_down_cpumask;
@@ -3435,6 +3457,43 @@ void migrate_enable(void)
*/
p->migrate_disable = 0;
+ if (p->migrate_disable_update) {
+ struct rq *rq;
+ struct rq_flags rf;
+
+ rq = task_rq_lock(p, &rf);
+ update_rq_clock(rq);
+
+ __do_set_cpus_allowed_tail(p, &p->cpus_allowed);
+ task_rq_unlock(rq, p, &rf);
+
+ p->migrate_disable_update = 0;
+
+ WARN_ON(smp_processor_id() != task_cpu(p));
+ if (!cpumask_test_cpu(task_cpu(p), &p->cpus_allowed)) {
+ const struct cpumask *cpu_valid_mask = cpu_active_mask;
+ struct migration_arg arg;
+ unsigned int dest_cpu;
+
+ if (p->flags & PF_KTHREAD) {
+ /*
+ * Kernel threads are allowed on online && !active CPUs
+ */
+ cpu_valid_mask = cpu_online_mask;
+ }
+ dest_cpu = cpumask_any_and(cpu_valid_mask, &p->cpus_allowed);
+ arg.task = p;
+ arg.dest_cpu = dest_cpu;
+
+ unpin_current_cpu();
+ preempt_lazy_enable();
+ preempt_enable();
+ stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
+ tlb_migrate_finish(p->mm);
+ return;
+ }
+ }
+
unpin_current_cpu();
preempt_enable();
preempt_lazy_enable();
diff --git a/localversion-rt b/localversion-rt
--- a/localversion-rt
+++ b/localversion-rt
@@ -1 +1 @@
--rt22
+-rt23
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
29 files changed, 386 insertions, 126 deletions
diff --git a/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch b/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch index 28b3dc4c6424..92dd57c497df 100644 --- a/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch +++ b/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch @@ -86,7 +86,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> mutex_init(&sp->so_delegreturn_mutex); return sp; } -@@ -1497,8 +1497,12 @@ static int nfs4_reclaim_open_state(struc +@@ -1498,8 +1498,12 @@ static int nfs4_reclaim_open_state(struc * recovering after a network partition or a reboot from a * server that doesn't support a grace period. */ @@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> restart: list_for_each_entry(state, &sp->so_states, open_states) { if (!test_and_clear_bit(ops->state_flag_bit, &state->flags)) -@@ -1567,14 +1571,20 @@ static int nfs4_reclaim_open_state(struc +@@ -1568,14 +1572,20 @@ static int nfs4_reclaim_open_state(struc spin_lock(&sp->so_lock); goto restart; } diff --git a/patches/arch-arm64-Add-lazy-preempt-support.patch b/patches/arch-arm64-Add-lazy-preempt-support.patch index 596f6f2cf08f..13bcb94c4664 100644 --- a/patches/arch-arm64-Add-lazy-preempt-support.patch +++ b/patches/arch-arm64-Add-lazy-preempt-support.patch @@ -77,7 +77,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org> DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S -@@ -428,11 +428,16 @@ ENDPROC(el1_sync) +@@ -430,11 +430,16 @@ ENDPROC(el1_sync) #ifdef CONFIG_PREEMPT ldr w24, [tsk, #TI_PREEMPT] // get preempt count @@ -97,7 +97,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org> #endif #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_on -@@ -446,6 +451,7 @@ ENDPROC(el1_irq) +@@ -448,6 +453,7 @@ ENDPROC(el1_irq) 1: bl preempt_schedule_irq // irq en/disable is done inside ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? diff --git a/patches/cgroups-use-simple-wait-in-css_release.patch b/patches/cgroups-use-simple-wait-in-css_release.patch index 3a7950e615ee..484bac543a2d 100644 --- a/patches/cgroups-use-simple-wait-in-css_release.patch +++ b/patches/cgroups-use-simple-wait-in-css_release.patch @@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #ifdef CONFIG_CGROUPS -@@ -137,6 +138,7 @@ struct cgroup_subsys_state { +@@ -138,6 +139,7 @@ struct cgroup_subsys_state { /* percpu_ref killing and RCU release */ struct rcu_head rcu_head; struct work_struct destroy_work; @@ -76,7 +76,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } static void init_and_link_css(struct cgroup_subsys_state *css, -@@ -5740,6 +5740,7 @@ static int __init cgroup_wq_init(void) +@@ -5745,6 +5745,7 @@ static int __init cgroup_wq_init(void) */ cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1); BUG_ON(!cgroup_destroy_wq); diff --git a/patches/completion-use-simple-wait-queues.patch b/patches/completion-use-simple-wait-queues.patch index e6e25643a17a..bf6862bd53f8 100644 --- a/patches/completion-use-simple-wait-queues.patch +++ b/patches/completion-use-simple-wait-queues.patch @@ -286,7 +286,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> EXPORT_SYMBOL(completion_done); --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -3323,7 +3323,10 @@ void migrate_disable(void) +@@ -3335,7 +3335,10 @@ void migrate_disable(void) } #ifdef CONFIG_SCHED_DEBUG @@ -298,7 +298,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #endif if (p->migrate_disable) { -@@ -3350,7 +3353,10 @@ void migrate_enable(void) +@@ -3362,7 +3365,10 @@ void migrate_enable(void) } #ifdef CONFIG_SCHED_DEBUG diff --git a/patches/cpu-rt-rework-cpu-down.patch b/patches/cpu-rt-rework-cpu-down.patch index 00b2a2b57fcf..745077e6b47b 100644 --- a/patches/cpu-rt-rework-cpu-down.patch +++ b/patches/cpu-rt-rework-cpu-down.patch @@ -56,7 +56,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -2479,6 +2479,10 @@ extern void do_set_cpus_allowed(struct t +@@ -2492,6 +2492,10 @@ extern void do_set_cpus_allowed(struct t extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); @@ -67,7 +67,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #else static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) -@@ -2491,6 +2495,9 @@ static inline int set_cpus_allowed_ptr(s +@@ -2504,6 +2508,9 @@ static inline int set_cpus_allowed_ptr(s return -EINVAL; return 0; } @@ -438,7 +438,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> * interrupt affinities. --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -1140,6 +1140,84 @@ void do_set_cpus_allowed(struct task_str +@@ -1152,6 +1152,84 @@ void do_set_cpus_allowed(struct task_str set_curr_task(rq, p); } diff --git a/patches/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch b/patches/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch index c574a092c809..38a0ece74ccb 100644 --- a/patches/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch +++ b/patches/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch @@ -78,7 +78,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /** * intel_pipe_update_start() - start update of a set of display registers * @crtc: the crtc of which the registers are going to be updated -@@ -95,7 +98,7 @@ void intel_pipe_update_start(struct inte +@@ -98,7 +101,7 @@ void intel_pipe_update_start(struct inte min = vblank_start - intel_usecs_to_scanlines(adjusted_mode, 100); max = vblank_start - 1; @@ -87,7 +87,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> if (min <= 0 || max <= 0) return; -@@ -125,11 +128,11 @@ void intel_pipe_update_start(struct inte +@@ -128,11 +131,11 @@ void intel_pipe_update_start(struct inte break; } @@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } finish_wait(wq, &wait); -@@ -181,7 +184,7 @@ void intel_pipe_update_end(struct intel_ +@@ -202,7 +205,7 @@ void intel_pipe_update_end(struct intel_ crtc->base.state->event = NULL; } diff --git a/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch b/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch index 56d7c4fd2058..887ced4ee73f 100644 --- a/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch +++ b/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch @@ -330,7 +330,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu> /* Get the next period (per-CPU) */ --- a/kernel/watchdog.c +++ b/kernel/watchdog.c -@@ -522,6 +522,7 @@ static void watchdog_enable(unsigned int +@@ -381,6 +381,7 @@ static void watchdog_enable(unsigned int /* kick off the timer for the hardlockup detector */ hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer->function = watchdog_timer_fn; diff --git a/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch b/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch index 898a270e27f0..197250dd512e 100644 --- a/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch +++ b/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch @@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c -@@ -12131,7 +12131,7 @@ void intel_check_page_flip(struct drm_i9 +@@ -12134,7 +12134,7 @@ void intel_check_page_flip(struct drm_i9 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_flip_work *work; diff --git a/patches/kernel-locking-use-an-exclusive-wait_q-for-sleeper.patch b/patches/kernel-locking-use-an-exclusive-wait_q-for-sleeper.patch new file mode 100644 index 000000000000..ce4353df8b2a --- /dev/null +++ b/patches/kernel-locking-use-an-exclusive-wait_q-for-sleeper.patch @@ -0,0 +1,137 @@ +From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Date: Thu, 22 Jun 2017 17:53:34 +0200 +Subject: [PATCH] kernel/locking: use an exclusive wait_q for sleepers +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +If a task is queued as a sleeper for a wakeup and never goes to +schedule() (because it just obtained the lock) then it will receive a +spurious wake up which is not "bad", it is considered. Until that wake +up happens this task can no be enqueued for any wake ups handled by the +WAKE_Q infrastructure (because a task can only be enqueued once). This +wouldn't be bad if we would use the same wakeup mechanism for the wake +up of sleepers as we do for "normal" wake ups. But we don't… + +So. + T1 T2 T3 + spin_lock(x) spin_unlock(x); + wake_q_add_sleeper(q1, T1) + spin_unlock(x) + set_state(TASK_INTERRUPTIBLE) + if (!condition) + schedule() + condition = true + wake_q_add(q2, T1) + // T1 not added, still enqueued + wake_up_q(q2) + wake_up_q_sleeper(q1) + // T1 not woken up, wrong task state + +In order to solve this race this patch adds a wake_q_node for the +sleeper case. + +Reported-by: Mike Galbraith <efault@gmx.de> +Cc: stable-rt@vger.kernel.org +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + include/linux/sched.h | 17 +++++++++++++++-- + kernel/fork.c | 1 + + kernel/locking/rtmutex.c | 2 +- + kernel/sched/core.c | 20 ++++++++++++++++---- + 4 files changed, 33 insertions(+), 7 deletions(-) + +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1016,8 +1016,20 @@ struct wake_q_head { + #define WAKE_Q(name) \ + struct wake_q_head name = { WAKE_Q_TAIL, &name.first } + +-extern void wake_q_add(struct wake_q_head *head, +- struct task_struct *task); ++extern void __wake_q_add(struct wake_q_head *head, ++ struct task_struct *task, bool sleeper); ++static inline void wake_q_add(struct wake_q_head *head, ++ struct task_struct *task) ++{ ++ __wake_q_add(head, task, false); ++} ++ ++static inline void wake_q_add_sleeper(struct wake_q_head *head, ++ struct task_struct *task) ++{ ++ __wake_q_add(head, task, true); ++} ++ + extern void __wake_up_q(struct wake_q_head *head, bool sleeper); + + static inline void wake_up_q(struct wake_q_head *head) +@@ -1747,6 +1759,7 @@ struct task_struct { + raw_spinlock_t pi_lock; + + struct wake_q_node wake_q; ++ struct wake_q_node wake_q_sleeper; + + #ifdef CONFIG_RT_MUTEXES + /* PI waiters blocked on a rt_mutex held by this task */ +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -558,6 +558,7 @@ static struct task_struct *dup_task_stru + tsk->splice_pipe = NULL; + tsk->task_frag.page = NULL; + tsk->wake_q.next = NULL; ++ tsk->wake_q_sleeper.next = NULL; + + account_kernel_stack(tsk, 1); + +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -1463,7 +1463,7 @@ static void mark_wakeup_next_waiter(stru + */ + preempt_disable(); + if (waiter->savestate) +- wake_q_add(wake_sleeper_q, waiter->task); ++ wake_q_add_sleeper(wake_sleeper_q, waiter->task); + else + wake_q_add(wake_q, waiter->task); + raw_spin_unlock(¤t->pi_lock); +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -430,9 +430,15 @@ static bool set_nr_if_polling(struct tas + #endif + #endif + +-void wake_q_add(struct wake_q_head *head, struct task_struct *task) ++void __wake_q_add(struct wake_q_head *head, struct task_struct *task, ++ bool sleeper) + { +- struct wake_q_node *node = &task->wake_q; ++ struct wake_q_node *node; ++ ++ if (sleeper) ++ node = &task->wake_q_sleeper; ++ else ++ node = &task->wake_q; + + /* + * Atomically grab the task, if ->wake_q is !nil already it means +@@ -461,11 +467,17 @@ void __wake_up_q(struct wake_q_head *hea + while (node != WAKE_Q_TAIL) { + struct task_struct *task; + +- task = container_of(node, struct task_struct, wake_q); ++ if (sleeper) ++ task = container_of(node, struct task_struct, wake_q_sleeper); ++ else ++ task = container_of(node, struct task_struct, wake_q); + BUG_ON(!task); + /* task can safely be re-inserted now */ + node = node->next; +- task->wake_q.next = NULL; ++ if (sleeper) ++ task->wake_q_sleeper.next = NULL; ++ else ++ task->wake_q.next = NULL; + + /* + * wake_up_process() implies a wmb() to pair with the queueing diff --git a/patches/localversion.patch b/patches/localversion.patch index 1b76c9e42e6f..cad0c1aa7aa9 100644 --- a/patches/localversion.patch +++ b/patches/localversion.patch @@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- /dev/null +++ b/localversion-rt @@ -0,0 +1 @@ -+-rt22 ++-rt23 diff --git a/patches/mm-convert-swap-to-percpu-locked.patch b/patches/mm-convert-swap-to-percpu-locked.patch index 817ae137493c..835824e89a0e 100644 --- a/patches/mm-convert-swap-to-percpu-locked.patch +++ b/patches/mm-convert-swap-to-percpu-locked.patch @@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } --- a/mm/page_alloc.c +++ b/mm/page_alloc.c -@@ -6594,7 +6594,9 @@ static int page_alloc_cpu_notify(struct +@@ -6605,7 +6605,9 @@ static int page_alloc_cpu_notify(struct int cpu = (unsigned long)hcpu; if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { diff --git a/patches/mm-enable-slub.patch b/patches/mm-enable-slub.patch index f8d47f2f97a6..16520a20ccf3 100644 --- a/patches/mm-enable-slub.patch +++ b/patches/mm-enable-slub.patch @@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> struct list_head slabs_partial; /* partial list first, better asm code */ --- a/mm/slub.c +++ b/mm/slub.c -@@ -1141,7 +1141,7 @@ static noinline int free_debug_processin +@@ -1144,7 +1144,7 @@ static noinline int free_debug_processin unsigned long uninitialized_var(flags); int ret = 0; @@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> slab_lock(page); if (s->flags & SLAB_CONSISTENCY_CHECKS) { -@@ -1176,7 +1176,7 @@ static noinline int free_debug_processin +@@ -1179,7 +1179,7 @@ static noinline int free_debug_processin bulk_cnt, cnt); slab_unlock(page); @@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> if (!ret) slab_fix(s, "Object at 0x%p not freed", object); return ret; -@@ -1304,6 +1304,12 @@ static inline void dec_slabs_node(struct +@@ -1307,6 +1307,12 @@ static inline void dec_slabs_node(struct #endif /* CONFIG_SLUB_DEBUG */ @@ -58,7 +58,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * Hooks for other subsystems that check memory allocations. In a typical * production configuration these hooks all should produce no code at all. -@@ -1530,7 +1536,11 @@ static struct page *allocate_slab(struct +@@ -1533,7 +1539,11 @@ static struct page *allocate_slab(struct flags &= gfp_allowed_mask; @@ -70,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> local_irq_enable(); flags |= s->allocflags; -@@ -1605,7 +1615,11 @@ static struct page *allocate_slab(struct +@@ -1608,7 +1618,11 @@ static struct page *allocate_slab(struct page->frozen = 1; out: @@ -82,7 +82,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> local_irq_disable(); if (!page) return NULL; -@@ -1664,6 +1678,16 @@ static void __free_slab(struct kmem_cach +@@ -1667,6 +1681,16 @@ static void __free_slab(struct kmem_cach __free_pages(page, order); } @@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #define need_reserve_slab_rcu \ (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head)) -@@ -1695,6 +1719,12 @@ static void free_slab(struct kmem_cache +@@ -1698,6 +1722,12 @@ static void free_slab(struct kmem_cache } call_rcu(head, rcu_free_slab); @@ -112,7 +112,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } else __free_slab(s, page); } -@@ -1802,7 +1832,7 @@ static void *get_partial_node(struct kme +@@ -1805,7 +1835,7 @@ static void *get_partial_node(struct kme if (!n || !n->nr_partial) return NULL; @@ -121,7 +121,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> list_for_each_entry_safe(page, page2, &n->partial, lru) { void *t; -@@ -1827,7 +1857,7 @@ static void *get_partial_node(struct kme +@@ -1830,7 +1860,7 @@ static void *get_partial_node(struct kme break; } @@ -130,7 +130,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return object; } -@@ -2073,7 +2103,7 @@ static void deactivate_slab(struct kmem_ +@@ -2076,7 +2106,7 @@ static void deactivate_slab(struct kmem_ * that acquire_slab() will see a slab page that * is frozen */ @@ -139,7 +139,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } } else { m = M_FULL; -@@ -2084,7 +2114,7 @@ static void deactivate_slab(struct kmem_ +@@ -2087,7 +2117,7 @@ static void deactivate_slab(struct kmem_ * slabs from diagnostic functions will not see * any frozen slabs. */ @@ -148,7 +148,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } } -@@ -2119,7 +2149,7 @@ static void deactivate_slab(struct kmem_ +@@ -2122,7 +2152,7 @@ static void deactivate_slab(struct kmem_ goto redo; if (lock) @@ -157,7 +157,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> if (m == M_FREE) { stat(s, DEACTIVATE_EMPTY); -@@ -2151,10 +2181,10 @@ static void unfreeze_partials(struct kme +@@ -2154,10 +2184,10 @@ static void unfreeze_partials(struct kme n2 = get_node(s, page_to_nid(page)); if (n != n2) { if (n) @@ -170,7 +170,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } do { -@@ -2183,7 +2213,7 @@ static void unfreeze_partials(struct kme +@@ -2186,7 +2216,7 @@ static void unfreeze_partials(struct kme } if (n) @@ -179,7 +179,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> while (discard_page) { page = discard_page; -@@ -2222,14 +2252,21 @@ static void put_cpu_partial(struct kmem_ +@@ -2225,14 +2255,21 @@ static void put_cpu_partial(struct kmem_ pobjects = oldpage->pobjects; pages = oldpage->pages; if (drain && pobjects > s->cpu_partial) { @@ -201,7 +201,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> oldpage = NULL; pobjects = 0; pages = 0; -@@ -2301,7 +2338,22 @@ static bool has_cpu_slab(int cpu, void * +@@ -2304,7 +2341,22 @@ static bool has_cpu_slab(int cpu, void * static void flush_all(struct kmem_cache *s) { @@ -224,7 +224,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } /* -@@ -2356,10 +2408,10 @@ static unsigned long count_partial(struc +@@ -2359,10 +2411,10 @@ static unsigned long count_partial(struc unsigned long x = 0; struct page *page; @@ -237,7 +237,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return x; } #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */ -@@ -2497,8 +2549,10 @@ static inline void *get_freelist(struct +@@ -2500,8 +2552,10 @@ static inline void *get_freelist(struct * already disabled (which is the case for bulk allocation). */ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, @@ -249,7 +249,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> void *freelist; struct page *page; -@@ -2558,6 +2612,13 @@ static void *___slab_alloc(struct kmem_c +@@ -2561,6 +2615,13 @@ static void *___slab_alloc(struct kmem_c VM_BUG_ON(!c->page->frozen); c->freelist = get_freepointer(s, freelist); c->tid = next_tid(c->tid); @@ -263,7 +263,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return freelist; new_slab: -@@ -2589,7 +2650,7 @@ static void *___slab_alloc(struct kmem_c +@@ -2592,7 +2653,7 @@ static void *___slab_alloc(struct kmem_c deactivate_slab(s, page, get_freepointer(s, freelist)); c->page = NULL; c->freelist = NULL; @@ -272,7 +272,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } /* -@@ -2601,6 +2662,7 @@ static void *__slab_alloc(struct kmem_ca +@@ -2604,6 +2665,7 @@ static void *__slab_alloc(struct kmem_ca { void *p; unsigned long flags; @@ -280,7 +280,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> local_irq_save(flags); #ifdef CONFIG_PREEMPT -@@ -2612,8 +2674,9 @@ static void *__slab_alloc(struct kmem_ca +@@ -2615,8 +2677,9 @@ static void *__slab_alloc(struct kmem_ca c = this_cpu_ptr(s->cpu_slab); #endif @@ -291,7 +291,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return p; } -@@ -2799,7 +2862,7 @@ static void __slab_free(struct kmem_cach +@@ -2802,7 +2865,7 @@ static void __slab_free(struct kmem_cach do { if (unlikely(n)) { @@ -300,7 +300,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> n = NULL; } prior = page->freelist; -@@ -2831,7 +2894,7 @@ static void __slab_free(struct kmem_cach +@@ -2834,7 +2897,7 @@ static void __slab_free(struct kmem_cach * Otherwise the list_lock will synchronize with * other processors updating the list of slabs. */ @@ -309,7 +309,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } } -@@ -2873,7 +2936,7 @@ static void __slab_free(struct kmem_cach +@@ -2876,7 +2939,7 @@ static void __slab_free(struct kmem_cach add_partial(n, page, DEACTIVATE_TO_TAIL); stat(s, FREE_ADD_PARTIAL); } @@ -318,7 +318,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return; slab_empty: -@@ -2888,7 +2951,7 @@ static void __slab_free(struct kmem_cach +@@ -2891,7 +2954,7 @@ static void __slab_free(struct kmem_cach remove_full(s, n, page); } @@ -327,7 +327,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> stat(s, FREE_SLAB); discard_slab(s, page); } -@@ -3093,6 +3156,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca +@@ -3096,6 +3159,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca void **p) { struct kmem_cache_cpu *c; @@ -335,7 +335,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> int i; /* memcg and kmem_cache debug support */ -@@ -3116,7 +3180,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca +@@ -3119,7 +3183,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca * of re-populating per CPU c->freelist */ p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, @@ -344,7 +344,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> if (unlikely(!p[i])) goto error; -@@ -3128,6 +3192,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca +@@ -3131,6 +3195,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca } c->tid = next_tid(c->tid); local_irq_enable(); @@ -352,7 +352,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* Clear memory outside IRQ disabled fastpath loop */ if (unlikely(flags & __GFP_ZERO)) { -@@ -3275,7 +3340,7 @@ static void +@@ -3278,7 +3343,7 @@ static void init_kmem_cache_node(struct kmem_cache_node *n) { n->nr_partial = 0; @@ -361,7 +361,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> INIT_LIST_HEAD(&n->partial); #ifdef CONFIG_SLUB_DEBUG atomic_long_set(&n->nr_slabs, 0); -@@ -3619,6 +3684,10 @@ static void list_slab_objects(struct kme +@@ -3622,6 +3687,10 @@ static void list_slab_objects(struct kme const char *text) { #ifdef CONFIG_SLUB_DEBUG @@ -372,7 +372,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> void *addr = page_address(page); void *p; unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) * -@@ -3639,6 +3708,7 @@ static void list_slab_objects(struct kme +@@ -3642,6 +3711,7 @@ static void list_slab_objects(struct kme slab_unlock(page); kfree(map); #endif @@ -380,7 +380,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } /* -@@ -3652,7 +3722,7 @@ static void free_partial(struct kmem_cac +@@ -3655,7 +3725,7 @@ static void free_partial(struct kmem_cac struct page *page, *h; BUG_ON(irqs_disabled()); @@ -389,7 +389,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> list_for_each_entry_safe(page, h, &n->partial, lru) { if (!page->inuse) { remove_partial(n, page); -@@ -3662,7 +3732,7 @@ static void free_partial(struct kmem_cac +@@ -3665,7 +3735,7 @@ static void free_partial(struct kmem_cac "Objects remaining in %s on __kmem_cache_shutdown()"); } } @@ -398,7 +398,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> list_for_each_entry_safe(page, h, &discard, lru) discard_slab(s, page); -@@ -3905,7 +3975,7 @@ int __kmem_cache_shrink(struct kmem_cach +@@ -3908,7 +3978,7 @@ int __kmem_cache_shrink(struct kmem_cach for (i = 0; i < SHRINK_PROMOTE_MAX; i++) INIT_LIST_HEAD(promote + i); @@ -407,7 +407,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * Build lists of slabs to discard or promote. -@@ -3936,7 +4006,7 @@ int __kmem_cache_shrink(struct kmem_cach +@@ -3939,7 +4009,7 @@ int __kmem_cache_shrink(struct kmem_cach for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) list_splice(promote + i, &n->partial); @@ -416,7 +416,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* Release empty slabs */ list_for_each_entry_safe(page, t, &discard, lru) -@@ -4112,6 +4182,12 @@ void __init kmem_cache_init(void) +@@ -4115,6 +4185,12 @@ void __init kmem_cache_init(void) { static __initdata struct kmem_cache boot_kmem_cache, boot_kmem_cache_node; @@ -429,7 +429,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> if (debug_guardpage_minorder()) slub_max_order = 0; -@@ -4320,7 +4396,7 @@ static int validate_slab_node(struct kme +@@ -4323,7 +4399,7 @@ static int validate_slab_node(struct kme struct page *page; unsigned long flags; @@ -438,7 +438,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> list_for_each_entry(page, &n->partial, lru) { validate_slab_slab(s, page, map); -@@ -4342,7 +4418,7 @@ static int validate_slab_node(struct kme +@@ -4345,7 +4421,7 @@ static int validate_slab_node(struct kme s->name, count, atomic_long_read(&n->nr_slabs)); out: @@ -447,7 +447,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return count; } -@@ -4530,12 +4606,12 @@ static int list_locations(struct kmem_ca +@@ -4533,12 +4609,12 @@ static int list_locations(struct kmem_ca if (!atomic_long_read(&n->nr_slabs)) continue; diff --git a/patches/mm-page_alloc-reduce-lock-sections-further.patch b/patches/mm-page_alloc-reduce-lock-sections-further.patch index ec723b03314a..c8668b92d904 100644 --- a/patches/mm-page_alloc-reduce-lock-sections-further.patch +++ b/patches/mm-page_alloc-reduce-lock-sections-further.patch @@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/mm/page_alloc.c +++ b/mm/page_alloc.c -@@ -1085,7 +1085,7 @@ static bool bulkfree_pcp_prepare(struct +@@ -1096,7 +1096,7 @@ static bool bulkfree_pcp_prepare(struct #endif /* CONFIG_DEBUG_VM */ /* @@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> * Assumes all pages on list are in same zone, and of same order. * count is the number of pages to free. * -@@ -1096,19 +1096,58 @@ static bool bulkfree_pcp_prepare(struct +@@ -1107,19 +1107,58 @@ static bool bulkfree_pcp_prepare(struct * pinned" detection logic. */ static void free_pcppages_bulk(struct zone *zone, int count, @@ -85,7 +85,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> while (count) { struct page *page; struct list_head *list; -@@ -1124,7 +1163,7 @@ static void free_pcppages_bulk(struct zo +@@ -1135,7 +1174,7 @@ static void free_pcppages_bulk(struct zo batch_free++; if (++migratetype == MIGRATE_PCPTYPES) migratetype = 0; @@ -94,7 +94,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } while (list_empty(list)); /* This is the only non-empty list. Free them all. */ -@@ -1132,27 +1171,12 @@ static void free_pcppages_bulk(struct zo +@@ -1143,27 +1182,12 @@ static void free_pcppages_bulk(struct zo batch_free = count; do { @@ -123,7 +123,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } static void free_one_page(struct zone *zone, -@@ -1161,7 +1185,9 @@ static void free_one_page(struct zone *z +@@ -1172,7 +1196,9 @@ static void free_one_page(struct zone *z int migratetype) { unsigned long nr_scanned; @@ -134,7 +134,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED); if (nr_scanned) __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned); -@@ -1171,7 +1197,7 @@ static void free_one_page(struct zone *z +@@ -1182,7 +1208,7 @@ static void free_one_page(struct zone *z migratetype = get_pfnblock_migratetype(page, pfn); } __free_one_page(page, pfn, zone, order, migratetype); @@ -143,7 +143,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } static void __meminit __init_single_page(struct page *page, unsigned long pfn, -@@ -2259,16 +2285,18 @@ static int rmqueue_bulk(struct zone *zon +@@ -2270,16 +2296,18 @@ static int rmqueue_bulk(struct zone *zon void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) { unsigned long flags; @@ -163,7 +163,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } #endif -@@ -2284,16 +2312,21 @@ static void drain_pages_zone(unsigned in +@@ -2295,16 +2323,21 @@ static void drain_pages_zone(unsigned in unsigned long flags; struct per_cpu_pageset *pset; struct per_cpu_pages *pcp; @@ -187,7 +187,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } /* -@@ -2475,8 +2508,13 @@ void free_hot_cold_page(struct page *pag +@@ -2486,8 +2519,13 @@ void free_hot_cold_page(struct page *pag pcp->count++; if (pcp->count >= pcp->high) { unsigned long batch = READ_ONCE(pcp->batch); diff --git a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch index 7f9bca2c23a3..e3e7f20a2b52 100644 --- a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch +++ b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch @@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> int page_group_by_mobility_disabled __read_mostly; #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT -@@ -1244,10 +1257,10 @@ static void __free_pages_ok(struct page +@@ -1255,10 +1268,10 @@ static void __free_pages_ok(struct page return; migratetype = get_pfnblock_migratetype(page, pfn); @@ -57,7 +57,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } static void __init __free_pages_boot_core(struct page *page, unsigned int order) -@@ -2248,14 +2261,14 @@ void drain_zone_pages(struct zone *zone, +@@ -2259,14 +2272,14 @@ void drain_zone_pages(struct zone *zone, unsigned long flags; int to_drain, batch; @@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } #endif -@@ -2272,7 +2285,7 @@ static void drain_pages_zone(unsigned in +@@ -2283,7 +2296,7 @@ static void drain_pages_zone(unsigned in struct per_cpu_pageset *pset; struct per_cpu_pages *pcp; @@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> pset = per_cpu_ptr(zone->pageset, cpu); pcp = &pset->pcp; -@@ -2280,7 +2293,7 @@ static void drain_pages_zone(unsigned in +@@ -2291,7 +2304,7 @@ static void drain_pages_zone(unsigned in free_pcppages_bulk(zone, pcp->count, pcp); pcp->count = 0; } @@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } /* -@@ -2366,8 +2379,17 @@ void drain_all_pages(struct zone *zone) +@@ -2377,8 +2390,17 @@ void drain_all_pages(struct zone *zone) else cpumask_clear_cpu(cpu, &cpus_with_pcps); } @@ -110,7 +110,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } #ifdef CONFIG_HIBERNATION -@@ -2427,7 +2449,7 @@ void free_hot_cold_page(struct page *pag +@@ -2438,7 +2460,7 @@ void free_hot_cold_page(struct page *pag migratetype = get_pfnblock_migratetype(page, pfn); set_pcppage_migratetype(page, migratetype); @@ -119,7 +119,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> __count_vm_event(PGFREE); /* -@@ -2458,7 +2480,7 @@ void free_hot_cold_page(struct page *pag +@@ -2469,7 +2491,7 @@ void free_hot_cold_page(struct page *pag } out: @@ -128,7 +128,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } /* -@@ -2600,7 +2622,7 @@ struct page *buffered_rmqueue(struct zon +@@ -2611,7 +2633,7 @@ struct page *buffered_rmqueue(struct zon struct per_cpu_pages *pcp; struct list_head *list; @@ -137,7 +137,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> do { pcp = &this_cpu_ptr(zone->pageset)->pcp; list = &pcp->lists[migratetype]; -@@ -2627,7 +2649,7 @@ struct page *buffered_rmqueue(struct zon +@@ -2638,7 +2660,7 @@ struct page *buffered_rmqueue(struct zon * allocate greater than order-1 page units with __GFP_NOFAIL. */ WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); @@ -146,7 +146,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> do { page = NULL; -@@ -2639,22 +2661,24 @@ struct page *buffered_rmqueue(struct zon +@@ -2650,22 +2672,24 @@ struct page *buffered_rmqueue(struct zon if (!page) page = __rmqueue(zone, order, migratetype); } while (page && check_new_pages(page, order)); @@ -175,7 +175,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return NULL; } -@@ -6558,6 +6582,7 @@ static int page_alloc_cpu_notify(struct +@@ -6569,6 +6593,7 @@ static int page_alloc_cpu_notify(struct void __init page_alloc_init(void) { hotcpu_notifier(page_alloc_cpu_notify, 0); @@ -183,7 +183,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } /* -@@ -7386,7 +7411,7 @@ void zone_pcp_reset(struct zone *zone) +@@ -7397,7 +7422,7 @@ void zone_pcp_reset(struct zone *zone) struct per_cpu_pageset *pset; /* avoid races with drain_pages() */ @@ -192,7 +192,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> if (zone->pageset != &boot_pageset) { for_each_online_cpu(cpu) { pset = per_cpu_ptr(zone->pageset, cpu); -@@ -7395,7 +7420,7 @@ void zone_pcp_reset(struct zone *zone) +@@ -7406,7 +7431,7 @@ void zone_pcp_reset(struct zone *zone) free_percpu(zone->pageset); zone->pageset = &boot_pageset; } diff --git a/patches/mm-rt-kmap-atomic-scheduling.patch b/patches/mm-rt-kmap-atomic-scheduling.patch index 0b521f4b9a92..b2c79594cd75 100644 --- a/patches/mm-rt-kmap-atomic-scheduling.patch +++ b/patches/mm-rt-kmap-atomic-scheduling.patch @@ -229,7 +229,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins #include <asm/page.h> #include <asm/ptrace.h> -@@ -1985,6 +1986,12 @@ struct task_struct { +@@ -1998,6 +1999,12 @@ struct task_struct { int softirq_nestcnt; unsigned int softirqs_raised; #endif diff --git a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch index 6c6125c6d2af..cf9725637c58 100644 --- a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch +++ b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch @@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h -@@ -2463,14 +2463,53 @@ void netdev_freemem(struct net_device *d +@@ -2464,14 +2464,53 @@ void netdev_freemem(struct net_device *d void synchronize_net(void); int init_dummy_netdev(struct net_device *dev); @@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -1988,6 +1988,9 @@ struct task_struct { +@@ -2001,6 +2001,9 @@ struct task_struct { #ifdef CONFIG_DEBUG_ATOMIC_SLEEP unsigned long task_state_change; #endif diff --git a/patches/net-prevent-abba-deadlock.patch b/patches/net-prevent-abba-deadlock.patch index 87ae1b25f18f..d5ea19d0822a 100644 --- a/patches/net-prevent-abba-deadlock.patch +++ b/patches/net-prevent-abba-deadlock.patch @@ -95,7 +95,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/net/core/sock.c +++ b/net/core/sock.c -@@ -2499,12 +2499,11 @@ void lock_sock_nested(struct sock *sk, i +@@ -2492,12 +2492,11 @@ void lock_sock_nested(struct sock *sk, i if (sk->sk_lock.owned) __lock_sock(sk); sk->sk_lock.owned = 1; diff --git a/patches/perf-make-swevent-hrtimer-irqsafe.patch b/patches/perf-make-swevent-hrtimer-irqsafe.patch index 038bfcd4098a..29fc8a5e1da3 100644 --- a/patches/perf-make-swevent-hrtimer-irqsafe.patch +++ b/patches/perf-make-swevent-hrtimer-irqsafe.patch @@ -58,7 +58,7 @@ Signed-off-by: Steven Rostedt <rostedt@goodmis.org> --- a/kernel/events/core.c +++ b/kernel/events/core.c -@@ -8363,6 +8363,7 @@ static void perf_swevent_init_hrtimer(st +@@ -8384,6 +8384,7 @@ static void perf_swevent_init_hrtimer(st hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hwc->hrtimer.function = perf_swevent_hrtimer; diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch index 131245a37735..60614b20014f 100644 --- a/patches/preempt-lazy-support.patch +++ b/patches/preempt-lazy-support.patch @@ -127,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -3348,6 +3348,43 @@ static inline int test_tsk_need_resched( +@@ -3361,6 +3361,43 @@ static inline int test_tsk_need_resched( return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } @@ -219,7 +219,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> default PREEMPT_NONE --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -510,6 +510,38 @@ void resched_curr(struct rq *rq) +@@ -522,6 +522,38 @@ void resched_curr(struct rq *rq) trace_sched_wake_idle_without_ipi(cpu); } @@ -258,7 +258,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> void resched_cpu(int cpu) { struct rq *rq = cpu_rq(cpu); -@@ -2531,6 +2563,9 @@ int sched_fork(unsigned long clone_flags +@@ -2543,6 +2575,9 @@ int sched_fork(unsigned long clone_flags p->on_cpu = 0; #endif init_task_preempt_count(p); @@ -268,7 +268,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #ifdef CONFIG_SMP plist_node_init(&p->pushable_tasks, MAX_PRIO); RB_CLEAR_NODE(&p->pushable_dl_tasks); -@@ -3362,6 +3397,7 @@ void migrate_disable(void) +@@ -3374,6 +3409,7 @@ void migrate_disable(void) } preempt_disable(); @@ -276,7 +276,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> pin_current_cpu(); p->migrate_disable = 1; preempt_enable(); -@@ -3401,6 +3437,7 @@ void migrate_enable(void) +@@ -3413,6 +3449,7 @@ void migrate_enable(void) unpin_current_cpu(); preempt_enable(); @@ -284,7 +284,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } EXPORT_SYMBOL(migrate_enable); #endif -@@ -3530,6 +3567,7 @@ static void __sched notrace __schedule(b +@@ -3542,6 +3579,7 @@ static void __sched notrace __schedule(b next = pick_next_task(rq, prev, cookie); clear_tsk_need_resched(prev); @@ -292,7 +292,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> clear_preempt_need_resched(); rq->clock_skip_update = 0; -@@ -3675,6 +3713,30 @@ static void __sched notrace preempt_sche +@@ -3687,6 +3725,30 @@ static void __sched notrace preempt_sche } while (need_resched()); } @@ -323,7 +323,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #ifdef CONFIG_PREEMPT /* * this is the entry point to schedule() from in-kernel preemption -@@ -3689,7 +3751,8 @@ asmlinkage __visible void __sched notrac +@@ -3701,7 +3763,8 @@ asmlinkage __visible void __sched notrac */ if (likely(!preemptible())) return; @@ -333,7 +333,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> preempt_schedule_common(); } NOKPROBE_SYMBOL(preempt_schedule); -@@ -3716,6 +3779,9 @@ asmlinkage __visible void __sched notrac +@@ -3728,6 +3791,9 @@ asmlinkage __visible void __sched notrac if (likely(!preemptible())) return; @@ -343,7 +343,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> do { /* * Because the function tracer can trace preempt_count_sub() -@@ -5523,7 +5589,9 @@ void init_idle(struct task_struct *idle, +@@ -5535,7 +5601,9 @@ void init_idle(struct task_struct *idle, /* Set the preempt count _outside_ the spinlocks! */ init_idle_preempt_count(idle, cpu); diff --git a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch index dd83349b49d1..ffae873107cb 100644 --- a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch +++ b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch @@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #define task_contributes_to_load(task) \ ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ (task->flags & PF_FROZEN) == 0 && \ -@@ -3365,6 +3362,51 @@ static inline int signal_pending_state(l +@@ -3378,6 +3375,51 @@ static inline int signal_pending_state(l return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); } @@ -95,7 +95,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> * explicit rescheduling in places that are safe. The return --- a/kernel/ptrace.c +++ b/kernel/ptrace.c -@@ -166,7 +166,14 @@ static bool ptrace_freeze_traced(struct +@@ -172,7 +172,14 @@ static bool ptrace_freeze_traced(struct spin_lock_irq(&task->sighand->siglock); if (task_is_traced(task) && !__fatal_signal_pending(task)) { @@ -113,7 +113,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> spin_unlock_irq(&task->sighand->siglock); --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -1384,6 +1384,18 @@ int migrate_swap(struct task_struct *cur +@@ -1396,6 +1396,18 @@ int migrate_swap(struct task_struct *cur return ret; } @@ -132,7 +132,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * wait_task_inactive - wait for a thread to unschedule. * -@@ -1428,7 +1440,7 @@ unsigned long wait_task_inactive(struct +@@ -1440,7 +1452,7 @@ unsigned long wait_task_inactive(struct * is actually now running somewhere else! */ while (task_running(rq, p)) { @@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return 0; cpu_relax(); } -@@ -1443,7 +1455,8 @@ unsigned long wait_task_inactive(struct +@@ -1455,7 +1467,8 @@ unsigned long wait_task_inactive(struct running = task_running(rq, p); queued = task_on_rq_queued(p); ncsw = 0; diff --git a/patches/sched-migrate-disable-handle-updated-task-mask-mg-di.patch b/patches/sched-migrate-disable-handle-updated-task-mask-mg-di.patch new file mode 100644 index 000000000000..5c916df553d7 --- /dev/null +++ b/patches/sched-migrate-disable-handle-updated-task-mask-mg-di.patch @@ -0,0 +1,119 @@ +From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Date: Mon, 19 Jun 2017 09:55:47 +0200 +Subject: [PATCH] sched/migrate disable: handle updated task-mask mg-dis section + +If task's cpumask changes while in the task is in a migrate_disable() +section then we don't react on it after a migrate_enable(). It matters +however if current CPU is no longer part of the cpumask. We also miss +the ->set_cpus_allowed() callback. +This patch fixes it by setting task->migrate_disable_update once we this +"delayed" hook. +This bug was introduced while fixing unrelated issue in +migrate_disable() in v4.4-rt3 (update_migrate_disable() got removed +during that). + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + include/linux/sched.h | 1 + kernel/sched/core.c | 59 ++++++++++++++++++++++++++++++++++++++++++++------ + 2 files changed, 54 insertions(+), 6 deletions(-) + +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1547,6 +1547,7 @@ struct task_struct { + unsigned int policy; + #ifdef CONFIG_PREEMPT_RT_FULL + int migrate_disable; ++ int migrate_disable_update; + # ifdef CONFIG_SCHED_DEBUG + int migrate_disable_atomic; + # endif +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -1150,18 +1150,14 @@ void set_cpus_allowed_common(struct task + p->nr_cpus_allowed = cpumask_weight(new_mask); + } + +-void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) ++static void __do_set_cpus_allowed_tail(struct task_struct *p, ++ const struct cpumask *new_mask) + { + struct rq *rq = task_rq(p); + bool queued, running; + + lockdep_assert_held(&p->pi_lock); + +- if (__migrate_disabled(p)) { +- cpumask_copy(&p->cpus_allowed, new_mask); +- return; +- } +- + queued = task_on_rq_queued(p); + running = task_current(rq, p); + +@@ -1184,6 +1180,20 @@ void do_set_cpus_allowed(struct task_str + set_curr_task(rq, p); + } + ++void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) ++{ ++ if (__migrate_disabled(p)) { ++ lockdep_assert_held(&p->pi_lock); ++ ++ cpumask_copy(&p->cpus_allowed, new_mask); ++#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP) ++ p->migrate_disable_update = 1; ++#endif ++ return; ++ } ++ __do_set_cpus_allowed_tail(p, new_mask); ++} ++ + static DEFINE_PER_CPU(struct cpumask, sched_cpumasks); + static DEFINE_MUTEX(sched_down_mutex); + static cpumask_t sched_down_cpumask; +@@ -3447,6 +3457,43 @@ void migrate_enable(void) + */ + p->migrate_disable = 0; + ++ if (p->migrate_disable_update) { ++ struct rq *rq; ++ struct rq_flags rf; ++ ++ rq = task_rq_lock(p, &rf); ++ update_rq_clock(rq); ++ ++ __do_set_cpus_allowed_tail(p, &p->cpus_allowed); ++ task_rq_unlock(rq, p, &rf); ++ ++ p->migrate_disable_update = 0; ++ ++ WARN_ON(smp_processor_id() != task_cpu(p)); ++ if (!cpumask_test_cpu(task_cpu(p), &p->cpus_allowed)) { ++ const struct cpumask *cpu_valid_mask = cpu_active_mask; ++ struct migration_arg arg; ++ unsigned int dest_cpu; ++ ++ if (p->flags & PF_KTHREAD) { ++ /* ++ * Kernel threads are allowed on online && !active CPUs ++ */ ++ cpu_valid_mask = cpu_online_mask; ++ } ++ dest_cpu = cpumask_any_and(cpu_valid_mask, &p->cpus_allowed); ++ arg.task = p; ++ arg.dest_cpu = dest_cpu; ++ ++ unpin_current_cpu(); ++ preempt_lazy_enable(); ++ preempt_enable(); ++ stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); ++ tlb_migrate_finish(p->mm); ++ return; ++ } ++ } ++ + unpin_current_cpu(); + preempt_enable(); + preempt_lazy_enable(); diff --git a/patches/seqlock-prevent-rt-starvation.patch b/patches/seqlock-prevent-rt-starvation.patch index 9b97b8277e66..878c7f6f2dd2 100644 --- a/patches/seqlock-prevent-rt-starvation.patch +++ b/patches/seqlock-prevent-rt-starvation.patch @@ -159,7 +159,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/include/net/dst.h +++ b/include/net/dst.h -@@ -446,7 +446,7 @@ static inline void dst_confirm(struct ds +@@ -452,7 +452,7 @@ static inline void dst_confirm(struct ds static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n, struct sk_buff *skb) { diff --git a/patches/series b/patches/series index 601935aca5cc..586b6296b0fd 100644 --- a/patches/series +++ b/patches/series @@ -366,6 +366,7 @@ rtmutex-avoid-include-hell.patch rtmutex_dont_include_rcu.patch rt-add-rt-locks.patch rt-drop_mutex_disable_on_not_debug.patch +kernel-locking-use-an-exclusive-wait_q-for-sleeper.patch rtmutex-add-a-first-shot-of-ww_mutex.patch rtmutex-Provide-rt_mutex_lock_state.patch rtmutex-Provide-locked-slowpath.patch @@ -584,6 +585,9 @@ arm-preempt-lazy-support.patch powerpc-preempt-lazy-support.patch arch-arm64-Add-lazy-preempt-support.patch +# late mg fixup +sched-migrate-disable-handle-updated-task-mask-mg-di.patch + # LEDS leds-trigger-disable-CPU-trigger-on-RT.patch diff --git a/patches/skbufhead-raw-lock.patch b/patches/skbufhead-raw-lock.patch index b5ad49cc34c8..c486923df58a 100644 --- a/patches/skbufhead-raw-lock.patch +++ b/patches/skbufhead-raw-lock.patch @@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h -@@ -2855,6 +2855,7 @@ struct softnet_data { +@@ -2856,6 +2856,7 @@ struct softnet_data { unsigned int dropped; struct sk_buff_head input_pkt_queue; struct napi_struct backlog; diff --git a/patches/slub-enable-irqs-for-no-wait.patch b/patches/slub-enable-irqs-for-no-wait.patch index b4af19555631..2c0ffe2f3a44 100644 --- a/patches/slub-enable-irqs-for-no-wait.patch +++ b/patches/slub-enable-irqs-for-no-wait.patch @@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/mm/slub.c +++ b/mm/slub.c -@@ -1533,14 +1533,17 @@ static struct page *allocate_slab(struct +@@ -1536,14 +1536,17 @@ static struct page *allocate_slab(struct void *start, *p; int idx, order; bool shuffle; @@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> local_irq_enable(); flags |= s->allocflags; -@@ -1615,11 +1618,7 @@ static struct page *allocate_slab(struct +@@ -1618,11 +1621,7 @@ static struct page *allocate_slab(struct page->frozen = 1; out: diff --git a/patches/workqueue-distangle-from-rq-lock.patch b/patches/workqueue-distangle-from-rq-lock.patch index ad9e7ee2ba5e..46cbdba66a43 100644 --- a/patches/workqueue-distangle-from-rq-lock.patch +++ b/patches/workqueue-distangle-from-rq-lock.patch @@ -31,7 +31,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -1711,10 +1711,6 @@ static inline void ttwu_activate(struct +@@ -1723,10 +1723,6 @@ static inline void ttwu_activate(struct { activate_task(rq, p, en_flags); p->on_rq = TASK_ON_RQ_QUEUED; @@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } /* -@@ -2152,53 +2148,6 @@ try_to_wake_up(struct task_struct *p, un +@@ -2164,53 +2160,6 @@ try_to_wake_up(struct task_struct *p, un } /** @@ -96,7 +96,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> * wake_up_process - Wake up a specific process * @p: The process to be woken up. * -@@ -3494,21 +3443,6 @@ static void __sched notrace __schedule(b +@@ -3506,21 +3455,6 @@ static void __sched notrace __schedule(b } else { deactivate_task(rq, prev, DEQUEUE_SLEEP); prev->on_rq = 0; @@ -118,7 +118,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } switch_count = &prev->nvcsw; } -@@ -3567,6 +3501,14 @@ static inline void sched_submit_work(str +@@ -3579,6 +3513,14 @@ static inline void sched_submit_work(str { if (!tsk->state || tsk_is_pi_blocked(tsk)) return; @@ -133,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * If we are going to sleep and we have plugged IO queued, * make sure to submit it to avoid deadlocks. -@@ -3575,6 +3517,12 @@ static inline void sched_submit_work(str +@@ -3587,6 +3529,12 @@ static inline void sched_submit_work(str blk_schedule_flush_plug(tsk); } @@ -146,7 +146,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> asmlinkage __visible void __sched schedule(void) { struct task_struct *tsk = current; -@@ -3585,6 +3533,7 @@ asmlinkage __visible void __sched schedu +@@ -3597,6 +3545,7 @@ asmlinkage __visible void __sched schedu __schedule(false); sched_preempt_enable_no_resched(); } while (need_resched()); diff --git a/patches/workqueue-prevent-deadlock-stall.patch b/patches/workqueue-prevent-deadlock-stall.patch index a8e412ee6b2d..295aac91de2b 100644 --- a/patches/workqueue-prevent-deadlock-stall.patch +++ b/patches/workqueue-prevent-deadlock-stall.patch @@ -43,7 +43,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org> --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -3615,9 +3615,8 @@ void __noreturn do_task_dead(void) +@@ -3674,9 +3674,8 @@ void __noreturn do_task_dead(void) static inline void sched_submit_work(struct task_struct *tsk) { @@ -54,7 +54,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org> /* * If a worker went to sleep, notify and ask workqueue whether * it wants to wake up a task to maintain concurrency. -@@ -3625,6 +3624,10 @@ static inline void sched_submit_work(str +@@ -3684,6 +3683,10 @@ static inline void sched_submit_work(str if (tsk->flags & PF_WQ_WORKER) wq_worker_sleeping(tsk); diff --git a/patches/x86-mce-timer-hrtimer.patch b/patches/x86-mce-timer-hrtimer.patch index 962744968ee7..6bc766fce080 100644 --- a/patches/x86-mce-timer-hrtimer.patch +++ b/patches/x86-mce-timer-hrtimer.patch @@ -34,7 +34,7 @@ fold in: #include <linux/jump_label.h> #include <asm/processor.h> -@@ -1307,7 +1308,7 @@ void mce_log_therm_throt_event(__u64 sta +@@ -1306,7 +1307,7 @@ void mce_log_therm_throt_event(__u64 sta static unsigned long check_interval = INITIAL_CHECK_INTERVAL; static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ @@ -43,7 +43,7 @@ fold in: static unsigned long mce_adjust_timer_default(unsigned long interval) { -@@ -1316,32 +1317,18 @@ static unsigned long mce_adjust_timer_de +@@ -1315,32 +1316,18 @@ static unsigned long mce_adjust_timer_de static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default; @@ -82,7 +82,7 @@ fold in: iv = __this_cpu_read(mce_next_interval); if (mce_available(this_cpu_ptr(&cpu_info))) { -@@ -1364,7 +1351,7 @@ static void mce_timer_fn(unsigned long d +@@ -1363,7 +1350,7 @@ static void mce_timer_fn(unsigned long d done: __this_cpu_write(mce_next_interval, iv); @@ -91,7 +91,7 @@ fold in: } /* -@@ -1372,7 +1359,7 @@ static void mce_timer_fn(unsigned long d +@@ -1371,7 +1358,7 @@ static void mce_timer_fn(unsigned long d */ void mce_timer_kick(unsigned long interval) { @@ -100,7 +100,7 @@ fold in: unsigned long iv = __this_cpu_read(mce_next_interval); __restart_timer(t, interval); -@@ -1387,7 +1374,7 @@ static void mce_timer_delete_all(void) +@@ -1386,7 +1373,7 @@ static void mce_timer_delete_all(void) int cpu; for_each_online_cpu(cpu) @@ -109,7 +109,7 @@ fold in: } static void mce_do_trigger(struct work_struct *work) -@@ -1722,7 +1709,7 @@ static void __mcheck_cpu_clear_vendor(st +@@ -1721,7 +1708,7 @@ static void __mcheck_cpu_clear_vendor(st } } @@ -118,7 +118,7 @@ fold in: { unsigned long iv = check_interval * HZ; -@@ -1731,16 +1718,17 @@ static void mce_start_timer(unsigned int +@@ -1730,16 +1717,17 @@ static void mce_start_timer(unsigned int per_cpu(mce_next_interval, cpu) = iv; @@ -140,7 +140,7 @@ fold in: mce_start_timer(cpu, t); } -@@ -2465,6 +2453,8 @@ static void mce_disable_cpu(void *h) +@@ -2464,6 +2452,8 @@ static void mce_disable_cpu(void *h) if (!mce_available(raw_cpu_ptr(&cpu_info))) return; @@ -149,7 +149,7 @@ fold in: if (!(action & CPU_TASKS_FROZEN)) cmci_clear(); -@@ -2487,6 +2477,7 @@ static void mce_reenable_cpu(void *h) +@@ -2486,6 +2476,7 @@ static void mce_reenable_cpu(void *h) if (b->init) wrmsrl(msr_ops.ctl(i), b->ctl); } @@ -157,7 +157,7 @@ fold in: } /* Get notified when a cpu comes on/off. Be hotplug friendly. */ -@@ -2494,7 +2485,6 @@ static int +@@ -2493,7 +2484,6 @@ static int mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; @@ -165,7 +165,7 @@ fold in: switch (action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: -@@ -2514,11 +2504,9 @@ mce_cpu_callback(struct notifier_block * +@@ -2513,11 +2503,9 @@ mce_cpu_callback(struct notifier_block * break; case CPU_DOWN_PREPARE: smp_call_function_single(cpu, mce_disable_cpu, &action, 1); diff --git a/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch b/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch index 6a9563ae89f2..ccac7200a012 100644 --- a/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch +++ b/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch @@ -68,7 +68,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de> #include <linux/jump_label.h> #include <asm/processor.h> -@@ -1384,6 +1385,56 @@ static void mce_do_trigger(struct work_s +@@ -1383,6 +1384,56 @@ static void mce_do_trigger(struct work_s static DECLARE_WORK(mce_trigger_work, mce_do_trigger); @@ -125,7 +125,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de> /* * Notify the user(s) about new machine check events. * Can be called from interrupt context, but not from machine check/NMI -@@ -1391,19 +1442,8 @@ static DECLARE_WORK(mce_trigger_work, mc +@@ -1390,19 +1441,8 @@ static DECLARE_WORK(mce_trigger_work, mc */ int mce_notify_irq(void) { @@ -146,7 +146,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de> return 1; } return 0; -@@ -2545,6 +2585,10 @@ static __init int mcheck_init_device(voi +@@ -2544,6 +2584,10 @@ static __init int mcheck_init_device(voi goto err_out; } |