summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--patches/completion-use-simple-wait-queues.patch4
-rw-r--r--patches/hotplug-light-get-online-cpus.patch4
-rw-r--r--patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch2
-rw-r--r--patches/kthread-convert-worker-lock-to-raw-spinlock.patch194
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/mm-kasan-make-quarantine_lock-a-raw_spinlock_t.patch91
-rw-r--r--patches/preempt-lazy-support.patch18
-rw-r--r--patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch6
-rw-r--r--patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch6
-rw-r--r--patches/rtmutex-annotate-sleeping-lock-context.patch2
-rw-r--r--patches/sched-might-sleep-do-not-account-rcu-depth.patch2
-rw-r--r--patches/sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch31
-rw-r--r--patches/sched-migrate_disable-fallback-to-preempt_disable-in.patch8
-rw-r--r--patches/sched-mmdrop-delayed.patch8
-rw-r--r--patches/sched-rt-mutex-wakeup.patch4
-rw-r--r--patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch2
-rw-r--r--patches/series5
-rw-r--r--patches/tty-serial-pl011-warning-about-uninitialized.patch37
-rw-r--r--patches/watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch74
-rw-r--r--patches/workqueue-distangle-from-rq-lock.patch12
-rw-r--r--patches/workqueue-prevent-deadlock-stall.patch4
21 files changed, 474 insertions, 42 deletions
diff --git a/patches/completion-use-simple-wait-queues.patch b/patches/completion-use-simple-wait-queues.patch
index 8bc26af7db43..ff615f7c1816 100644
--- a/patches/completion-use-simple-wait-queues.patch
+++ b/patches/completion-use-simple-wait-queues.patch
@@ -281,7 +281,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
EXPORT_SYMBOL(completion_done);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7124,7 +7124,10 @@ void migrate_disable(void)
+@@ -7125,7 +7125,10 @@ void migrate_disable(void)
return;
}
#ifdef CONFIG_SCHED_DEBUG
@@ -293,7 +293,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
if (p->migrate_disable) {
-@@ -7154,7 +7157,10 @@ void migrate_enable(void)
+@@ -7155,7 +7158,10 @@ void migrate_enable(void)
}
#ifdef CONFIG_SCHED_DEBUG
diff --git a/patches/hotplug-light-get-online-cpus.patch b/patches/hotplug-light-get-online-cpus.patch
index 6168096ecef7..31fd234b8669 100644
--- a/patches/hotplug-light-get-online-cpus.patch
+++ b/patches/hotplug-light-get-online-cpus.patch
@@ -64,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void cpus_read_lock(void)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7219,6 +7219,7 @@ void migrate_disable(void)
+@@ -7220,6 +7220,7 @@ void migrate_disable(void)
}
preempt_disable();
@@ -72,7 +72,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
migrate_disable_update_cpus_allowed(p);
p->migrate_disable = 1;
-@@ -7284,12 +7285,15 @@ void migrate_enable(void)
+@@ -7285,12 +7286,15 @@ void migrate_enable(void)
arg.task = p;
arg.dest_cpu = dest_cpu;
diff --git a/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
index bef20c12b1aa..1e99f118fbc9 100644
--- a/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
+++ b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
security_task_free(tsk);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2764,15 +2764,6 @@ static struct rq *finish_task_switch(str
+@@ -2765,15 +2765,6 @@ static struct rq *finish_task_switch(str
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
diff --git a/patches/kthread-convert-worker-lock-to-raw-spinlock.patch b/patches/kthread-convert-worker-lock-to-raw-spinlock.patch
new file mode 100644
index 000000000000..ffd7d27bb5d7
--- /dev/null
+++ b/patches/kthread-convert-worker-lock-to-raw-spinlock.patch
@@ -0,0 +1,194 @@
+From: Julia Cartwright <julia@ni.com>
+Date: Fri, 28 Sep 2018 21:03:51 +0000
+Subject: [PATCH] kthread: convert worker lock to raw spinlock
+
+In order to enable the queuing of kthread work items from hardirq
+context even when PREEMPT_RT_FULL is enabled, convert the worker
+spin_lock to a raw_spin_lock.
+
+This is only acceptable to do because the work performed under the lock
+is well-bounded and minimal.
+
+Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Cc: Guenter Roeck <linux@roeck-us.net>
+Reported-and-tested-by: Steffen Trumtrar <s.trumtrar@pengutronix.de>
+Reported-by: Tim Sander <tim@krieglstein.org>
+Signed-off-by: Julia Cartwright <julia@ni.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/kthread.h | 2 +-
+ kernel/kthread.c | 42 +++++++++++++++++++++---------------------
+ 2 files changed, 22 insertions(+), 22 deletions(-)
+
+--- a/include/linux/kthread.h
++++ b/include/linux/kthread.h
+@@ -85,7 +85,7 @@ enum {
+
+ struct kthread_worker {
+ unsigned int flags;
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ struct list_head work_list;
+ struct list_head delayed_work_list;
+ struct task_struct *task;
+--- a/kernel/kthread.c
++++ b/kernel/kthread.c
+@@ -597,7 +597,7 @@ void __kthread_init_worker(struct kthrea
+ struct lock_class_key *key)
+ {
+ memset(worker, 0, sizeof(struct kthread_worker));
+- spin_lock_init(&worker->lock);
++ raw_spin_lock_init(&worker->lock);
+ lockdep_set_class_and_name(&worker->lock, key, name);
+ INIT_LIST_HEAD(&worker->work_list);
+ INIT_LIST_HEAD(&worker->delayed_work_list);
+@@ -639,21 +639,21 @@ int kthread_worker_fn(void *worker_ptr)
+
+ if (kthread_should_stop()) {
+ __set_current_state(TASK_RUNNING);
+- spin_lock_irq(&worker->lock);
++ raw_spin_lock_irq(&worker->lock);
+ worker->task = NULL;
+- spin_unlock_irq(&worker->lock);
++ raw_spin_unlock_irq(&worker->lock);
+ return 0;
+ }
+
+ work = NULL;
+- spin_lock_irq(&worker->lock);
++ raw_spin_lock_irq(&worker->lock);
+ if (!list_empty(&worker->work_list)) {
+ work = list_first_entry(&worker->work_list,
+ struct kthread_work, node);
+ list_del_init(&work->node);
+ }
+ worker->current_work = work;
+- spin_unlock_irq(&worker->lock);
++ raw_spin_unlock_irq(&worker->lock);
+
+ if (work) {
+ __set_current_state(TASK_RUNNING);
+@@ -810,12 +810,12 @@ bool kthread_queue_work(struct kthread_w
+ bool ret = false;
+ unsigned long flags;
+
+- spin_lock_irqsave(&worker->lock, flags);
++ raw_spin_lock_irqsave(&worker->lock, flags);
+ if (!queuing_blocked(worker, work)) {
+ kthread_insert_work(worker, work, &worker->work_list);
+ ret = true;
+ }
+- spin_unlock_irqrestore(&worker->lock, flags);
++ raw_spin_unlock_irqrestore(&worker->lock, flags);
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(kthread_queue_work);
+@@ -841,7 +841,7 @@ void kthread_delayed_work_timer_fn(struc
+ if (WARN_ON_ONCE(!worker))
+ return;
+
+- spin_lock(&worker->lock);
++ raw_spin_lock(&worker->lock);
+ /* Work must not be used with >1 worker, see kthread_queue_work(). */
+ WARN_ON_ONCE(work->worker != worker);
+
+@@ -850,7 +850,7 @@ void kthread_delayed_work_timer_fn(struc
+ list_del_init(&work->node);
+ kthread_insert_work(worker, work, &worker->work_list);
+
+- spin_unlock(&worker->lock);
++ raw_spin_unlock(&worker->lock);
+ }
+ EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
+
+@@ -906,14 +906,14 @@ bool kthread_queue_delayed_work(struct k
+ unsigned long flags;
+ bool ret = false;
+
+- spin_lock_irqsave(&worker->lock, flags);
++ raw_spin_lock_irqsave(&worker->lock, flags);
+
+ if (!queuing_blocked(worker, work)) {
+ __kthread_queue_delayed_work(worker, dwork, delay);
+ ret = true;
+ }
+
+- spin_unlock_irqrestore(&worker->lock, flags);
++ raw_spin_unlock_irqrestore(&worker->lock, flags);
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
+@@ -949,7 +949,7 @@ void kthread_flush_work(struct kthread_w
+ if (!worker)
+ return;
+
+- spin_lock_irq(&worker->lock);
++ raw_spin_lock_irq(&worker->lock);
+ /* Work must not be used with >1 worker, see kthread_queue_work(). */
+ WARN_ON_ONCE(work->worker != worker);
+
+@@ -961,7 +961,7 @@ void kthread_flush_work(struct kthread_w
+ else
+ noop = true;
+
+- spin_unlock_irq(&worker->lock);
++ raw_spin_unlock_irq(&worker->lock);
+
+ if (!noop)
+ wait_for_completion(&fwork.done);
+@@ -994,9 +994,9 @@ static bool __kthread_cancel_work(struct
+ * any queuing is blocked by setting the canceling counter.
+ */
+ work->canceling++;
+- spin_unlock_irqrestore(&worker->lock, *flags);
++ raw_spin_unlock_irqrestore(&worker->lock, *flags);
+ del_timer_sync(&dwork->timer);
+- spin_lock_irqsave(&worker->lock, *flags);
++ raw_spin_lock_irqsave(&worker->lock, *flags);
+ work->canceling--;
+ }
+
+@@ -1043,7 +1043,7 @@ bool kthread_mod_delayed_work(struct kth
+ unsigned long flags;
+ int ret = false;
+
+- spin_lock_irqsave(&worker->lock, flags);
++ raw_spin_lock_irqsave(&worker->lock, flags);
+
+ /* Do not bother with canceling when never queued. */
+ if (!work->worker)
+@@ -1060,7 +1060,7 @@ bool kthread_mod_delayed_work(struct kth
+ fast_queue:
+ __kthread_queue_delayed_work(worker, dwork, delay);
+ out:
+- spin_unlock_irqrestore(&worker->lock, flags);
++ raw_spin_unlock_irqrestore(&worker->lock, flags);
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
+@@ -1074,7 +1074,7 @@ static bool __kthread_cancel_work_sync(s
+ if (!worker)
+ goto out;
+
+- spin_lock_irqsave(&worker->lock, flags);
++ raw_spin_lock_irqsave(&worker->lock, flags);
+ /* Work must not be used with >1 worker, see kthread_queue_work(). */
+ WARN_ON_ONCE(work->worker != worker);
+
+@@ -1088,13 +1088,13 @@ static bool __kthread_cancel_work_sync(s
+ * In the meantime, block any queuing by setting the canceling counter.
+ */
+ work->canceling++;
+- spin_unlock_irqrestore(&worker->lock, flags);
++ raw_spin_unlock_irqrestore(&worker->lock, flags);
+ kthread_flush_work(work);
+- spin_lock_irqsave(&worker->lock, flags);
++ raw_spin_lock_irqsave(&worker->lock, flags);
+ work->canceling--;
+
+ out_fast:
+- spin_unlock_irqrestore(&worker->lock, flags);
++ raw_spin_unlock_irqrestore(&worker->lock, flags);
+ out:
+ return ret;
+ }
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 4c1841b6475d..bbb08330835d 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt6
++-rt7
diff --git a/patches/mm-kasan-make-quarantine_lock-a-raw_spinlock_t.patch b/patches/mm-kasan-make-quarantine_lock-a-raw_spinlock_t.patch
new file mode 100644
index 000000000000..6172cac6fad0
--- /dev/null
+++ b/patches/mm-kasan-make-quarantine_lock-a-raw_spinlock_t.patch
@@ -0,0 +1,91 @@
+From: Clark Williams <williams@redhat.com>
+Date: Tue, 18 Sep 2018 10:29:31 -0500
+Subject: [PATCH] mm/kasan: make quarantine_lock a raw_spinlock_t
+
+The static lock quarantine_lock is used in quarantine.c to protect the
+quarantine queue datastructures. It is taken inside quarantine queue
+manipulation routines (quarantine_put(), quarantine_reduce() and
+quarantine_remove_cache()), with IRQs disabled.
+This is not a problem on a stock kernel but is problematic on an RT
+kernel where spin locks are sleeping spinlocks, which can sleep and can
+not be acquired with disabled interrupts.
+
+Convert the quarantine_lock to a raw spinlock_t. The usage of
+quarantine_lock is confined to quarantine.c and the work performed while
+the lock is held is limited.
+
+Signed-off-by: Clark Williams <williams@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/kasan/quarantine.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+--- a/mm/kasan/quarantine.c
++++ b/mm/kasan/quarantine.c
+@@ -103,7 +103,7 @@ static int quarantine_head;
+ static int quarantine_tail;
+ /* Total size of all objects in global_quarantine across all batches. */
+ static unsigned long quarantine_size;
+-static DEFINE_SPINLOCK(quarantine_lock);
++static DEFINE_RAW_SPINLOCK(quarantine_lock);
+ DEFINE_STATIC_SRCU(remove_cache_srcu);
+
+ /* Maximum size of the global queue. */
+@@ -190,7 +190,7 @@ void quarantine_put(struct kasan_free_me
+ if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) {
+ qlist_move_all(q, &temp);
+
+- spin_lock(&quarantine_lock);
++ raw_spin_lock(&quarantine_lock);
+ WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes);
+ qlist_move_all(&temp, &global_quarantine[quarantine_tail]);
+ if (global_quarantine[quarantine_tail].bytes >=
+@@ -203,7 +203,7 @@ void quarantine_put(struct kasan_free_me
+ if (new_tail != quarantine_head)
+ quarantine_tail = new_tail;
+ }
+- spin_unlock(&quarantine_lock);
++ raw_spin_unlock(&quarantine_lock);
+ }
+
+ local_irq_restore(flags);
+@@ -230,7 +230,7 @@ void quarantine_reduce(void)
+ * expected case).
+ */
+ srcu_idx = srcu_read_lock(&remove_cache_srcu);
+- spin_lock_irqsave(&quarantine_lock, flags);
++ raw_spin_lock_irqsave(&quarantine_lock, flags);
+
+ /*
+ * Update quarantine size in case of hotplug. Allocate a fraction of
+@@ -254,7 +254,7 @@ void quarantine_reduce(void)
+ quarantine_head = 0;
+ }
+
+- spin_unlock_irqrestore(&quarantine_lock, flags);
++ raw_spin_unlock_irqrestore(&quarantine_lock, flags);
+
+ qlist_free_all(&to_free, NULL);
+ srcu_read_unlock(&remove_cache_srcu, srcu_idx);
+@@ -310,17 +310,17 @@ void quarantine_remove_cache(struct kmem
+ */
+ on_each_cpu(per_cpu_remove_cache, cache, 1);
+
+- spin_lock_irqsave(&quarantine_lock, flags);
++ raw_spin_lock_irqsave(&quarantine_lock, flags);
+ for (i = 0; i < QUARANTINE_BATCHES; i++) {
+ if (qlist_empty(&global_quarantine[i]))
+ continue;
+ qlist_move_cache(&global_quarantine[i], &to_free, cache);
+ /* Scanning whole quarantine can take a while. */
+- spin_unlock_irqrestore(&quarantine_lock, flags);
++ raw_spin_unlock_irqrestore(&quarantine_lock, flags);
+ cond_resched();
+- spin_lock_irqsave(&quarantine_lock, flags);
++ raw_spin_lock_irqsave(&quarantine_lock, flags);
+ }
+- spin_unlock_irqrestore(&quarantine_lock, flags);
++ raw_spin_unlock_irqrestore(&quarantine_lock, flags);
+
+ qlist_free_all(&to_free, cache);
+
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index aaa7764da869..16a7eb3da880 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -282,7 +282,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
-@@ -2428,6 +2470,9 @@ int sched_fork(unsigned long clone_flags
+@@ -2429,6 +2471,9 @@ int sched_fork(unsigned long clone_flags
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -292,7 +292,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -3493,6 +3538,7 @@ static void __sched notrace __schedule(b
+@@ -3494,6 +3539,7 @@ static void __sched notrace __schedule(b
next = pick_next_task(rq, prev, &rf);
clear_tsk_need_resched(prev);
@@ -300,7 +300,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_preempt_need_resched();
if (likely(prev != next)) {
-@@ -3673,6 +3719,30 @@ static void __sched notrace preempt_sche
+@@ -3674,6 +3720,30 @@ static void __sched notrace preempt_sche
} while (need_resched());
}
@@ -331,7 +331,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
-@@ -3687,7 +3757,8 @@ asmlinkage __visible void __sched notrac
+@@ -3688,7 +3758,8 @@ asmlinkage __visible void __sched notrac
*/
if (likely(!preemptible()))
return;
@@ -341,7 +341,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
-@@ -3714,6 +3785,9 @@ asmlinkage __visible void __sched notrac
+@@ -3715,6 +3786,9 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
@@ -351,7 +351,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
/*
* Because the function tracer can trace preempt_count_sub()
-@@ -5482,7 +5556,9 @@ void init_idle(struct task_struct *idle,
+@@ -5483,7 +5557,9 @@ void init_idle(struct task_struct *idle,
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
@@ -362,7 +362,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The idle tasks have their own, simple scheduling class:
*/
-@@ -7198,6 +7274,7 @@ void migrate_disable(void)
+@@ -7199,6 +7275,7 @@ void migrate_disable(void)
}
preempt_disable();
@@ -370,7 +370,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pin_current_cpu();
migrate_disable_update_cpus_allowed(p);
-@@ -7265,6 +7342,7 @@ void migrate_enable(void)
+@@ -7266,6 +7343,7 @@ void migrate_enable(void)
arg.dest_cpu = dest_cpu;
unpin_current_cpu();
@@ -378,7 +378,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_enable();
stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
tlb_migrate_finish(p->mm);
-@@ -7273,6 +7351,7 @@ void migrate_enable(void)
+@@ -7274,6 +7352,7 @@ void migrate_enable(void)
}
}
unpin_current_cpu();
diff --git a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
index 7b0e3bcb7690..40d3b5ad2429 100644
--- a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+++ b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock_irq(&task->sighand->siglock);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1367,6 +1367,18 @@ int migrate_swap(struct task_struct *cur
+@@ -1368,6 +1368,18 @@ int migrate_swap(struct task_struct *cur
return ret;
}
@@ -134,7 +134,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* wait_task_inactive - wait for a thread to unschedule.
*
-@@ -1411,7 +1423,7 @@ unsigned long wait_task_inactive(struct
+@@ -1412,7 +1424,7 @@ unsigned long wait_task_inactive(struct
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
@@ -143,7 +143,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
cpu_relax();
}
-@@ -1426,7 +1438,8 @@ unsigned long wait_task_inactive(struct
+@@ -1427,7 +1439,8 @@ unsigned long wait_task_inactive(struct
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
diff --git a/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch b/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch
index ba90c0285945..c87657adf043 100644
--- a/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch
+++ b/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch
@@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7155,6 +7155,47 @@ const u32 sched_prio_to_wmult[40] = {
+@@ -7156,6 +7156,47 @@ const u32 sched_prio_to_wmult[40] = {
#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
@@ -128,7 +128,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void migrate_disable(void)
{
struct task_struct *p = current;
-@@ -7178,10 +7219,9 @@ void migrate_disable(void)
+@@ -7179,10 +7220,9 @@ void migrate_disable(void)
}
preempt_disable();
@@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
preempt_enable();
}
-@@ -7213,9 +7253,8 @@ void migrate_enable(void)
+@@ -7214,9 +7254,8 @@ void migrate_enable(void)
preempt_disable();
diff --git a/patches/rtmutex-annotate-sleeping-lock-context.patch b/patches/rtmutex-annotate-sleeping-lock-context.patch
index a8744ae31c3a..dd3f0f2103e6 100644
--- a/patches/rtmutex-annotate-sleeping-lock-context.patch
+++ b/patches/rtmutex-annotate-sleeping-lock-context.patch
@@ -239,7 +239,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7324,4 +7324,49 @@ void migrate_enable(void)
+@@ -7325,4 +7325,49 @@ void migrate_enable(void)
preempt_enable();
}
EXPORT_SYMBOL(migrate_enable);
diff --git a/patches/sched-might-sleep-do-not-account-rcu-depth.patch b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
index d7d1ba8c777b..1378bd3ba12c 100644
--- a/patches/sched-might-sleep-do-not-account-rcu-depth.patch
+++ b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Internal to kernel */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -6181,7 +6181,7 @@ void __init sched_init(void)
+@@ -6182,7 +6182,7 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
diff --git a/patches/sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch b/patches/sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch
new file mode 100644
index 000000000000..1b119a9a8a48
--- /dev/null
+++ b/patches/sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch
@@ -0,0 +1,31 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 9 Oct 2018 17:34:50 +0200
+Subject: [PATCH] sched/migrate_disable: Add export_symbol_gpl for
+ __migrate_disabled
+
+Jonathan reported that lttng/modules can't use __migrate_disabled().
+This function is only used by sched/core itself and the tracing
+infrastructure to report the migrate counter (lttng does probably the
+same). Since the rework migrate_disable() it moved from sched.h to
+preempt.h and is became an exported function instead of a "static
+inline" due to the header recursion of preempt vs sched.
+
+Since the compiler inlines the function for sched/core usage, add a
+EXPORT_SYMBOL_GPL to allow the module/LTTNG usage.
+
+Reported-by: Jonathan Rajott <jonathan.rajotte-julien@efficios.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/sched/core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1035,6 +1035,7 @@ int __migrate_disabled(struct task_struc
+ {
+ return p->migrate_disable;
+ }
++EXPORT_SYMBOL_GPL(__migrate_disabled);
+ #endif
+
+ static void __do_set_cpus_allowed_tail(struct task_struct *p,
diff --git a/patches/sched-migrate_disable-fallback-to-preempt_disable-in.patch b/patches/sched-migrate_disable-fallback-to-preempt_disable-in.patch
index 82cee01aa405..2d2120ec40c5 100644
--- a/patches/sched-migrate_disable-fallback-to-preempt_disable-in.patch
+++ b/patches/sched-migrate_disable-fallback-to-preempt_disable-in.patch
@@ -98,7 +98,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int __migrate_disabled(struct task_struct *p)
{
return p->migrate_disable;
-@@ -1092,7 +1092,7 @@ static void __do_set_cpus_allowed_tail(s
+@@ -1093,7 +1093,7 @@ static void __do_set_cpus_allowed_tail(s
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
@@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (__migrate_disabled(p)) {
lockdep_assert_held(&p->pi_lock);
-@@ -1165,7 +1165,7 @@ static int __set_cpus_allowed_ptr(struct
+@@ -1166,7 +1166,7 @@ static int __set_cpus_allowed_ptr(struct
if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
goto out;
@@ -116,7 +116,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (__migrate_disabled(p)) {
p->migrate_disable_update = 1;
goto out;
-@@ -7180,7 +7180,7 @@ const u32 sched_prio_to_wmult[40] = {
+@@ -7181,7 +7181,7 @@ const u32 sched_prio_to_wmult[40] = {
#undef CREATE_TRACE_POINTS
@@ -125,7 +125,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline void
update_nr_migratory(struct task_struct *p, long delta)
-@@ -7328,45 +7328,44 @@ EXPORT_SYMBOL(migrate_enable);
+@@ -7329,45 +7329,44 @@ EXPORT_SYMBOL(migrate_enable);
#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
void migrate_disable(void)
{
diff --git a/patches/sched-mmdrop-delayed.patch b/patches/sched-mmdrop-delayed.patch
index 7d618320898a..da5079f3a077 100644
--- a/patches/sched-mmdrop-delayed.patch
+++ b/patches/sched-mmdrop-delayed.patch
@@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct mm_struct *mm;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2752,9 +2752,13 @@ static struct rq *finish_task_switch(str
+@@ -2753,9 +2753,13 @@ static struct rq *finish_task_switch(str
* provided by mmdrop(),
* - a sync_core for SYNC_CORE.
*/
@@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead)
-@@ -5580,6 +5584,8 @@ void sched_setnuma(struct task_struct *p
+@@ -5581,6 +5585,8 @@ void sched_setnuma(struct task_struct *p
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_HOTPLUG_CPU
@@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Ensure that the idle task is using init_mm right before its CPU goes
* offline.
-@@ -5595,7 +5601,11 @@ void idle_task_exit(void)
+@@ -5596,7 +5602,11 @@ void idle_task_exit(void)
current->active_mm = &init_mm;
finish_arch_post_lock_switch();
}
@@ -114,7 +114,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -5912,6 +5922,10 @@ int sched_cpu_dying(unsigned int cpu)
+@@ -5913,6 +5923,10 @@ int sched_cpu_dying(unsigned int cpu)
update_max_interval();
nohz_balance_exit_idle(rq);
hrtick_clear(rq);
diff --git a/patches/sched-rt-mutex-wakeup.patch b/patches/sched-rt-mutex-wakeup.patch
index bf48bc4f892f..16a715a4f9e4 100644
--- a/patches/sched-rt-mutex-wakeup.patch
+++ b/patches/sched-rt-mutex-wakeup.patch
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2022,8 +2022,27 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2023,8 +2023,27 @@ try_to_wake_up(struct task_struct *p, un
*/
raw_spin_lock_irqsave(&p->pi_lock, flags);
smp_mb__after_spinlock();
@@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
trace_sched_waking(p);
-@@ -2187,6 +2206,18 @@ int wake_up_process(struct task_struct *
+@@ -2188,6 +2207,18 @@ int wake_up_process(struct task_struct *
}
EXPORT_SYMBOL(wake_up_process);
diff --git a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
index b1269f5db4ce..61eb5a91c096 100644
--- a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
+++ b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3519,8 +3519,10 @@ static void __sched notrace __schedule(b
+@@ -3520,8 +3520,10 @@ static void __sched notrace __schedule(b
* If a worker went to sleep, notify and ask workqueue
* whether it wants to wake up a task to maintain
* concurrency.
diff --git a/patches/series b/patches/series
index 41df8ad05f43..1a1217951ff8 100644
--- a/patches/series
+++ b/patches/series
@@ -45,6 +45,7 @@ irqchip-gic-v3-its-Make-its_lock-a-raw_spin_lock_t.patch
0006-ARM-configs-at91-unselect-PIT.patch
irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch
+kthread-convert-worker-lock-to-raw-spinlock.patch
############################################################
# POSTED
@@ -73,6 +74,7 @@ efi-Allow-efi-runtime.patch
x86-efi-drop-task_lock-from-efi_switch_mm.patch
arm64-KVM-compute_layout-before-altenates-are-applie.patch
of-allocate-free-phandle-cache-outside-of-the-devtre.patch
+mm-kasan-make-quarantine_lock-a-raw_spinlock_t.patch
###############################################################
# Stuff broken upstream and upstream wants something different
@@ -80,6 +82,7 @@ of-allocate-free-phandle-cache-outside-of-the-devtre.patch
NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
add_migrate_disable.patch
+sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch
# Those two should vanish soon (not use PIT during bootup)
at91_dont_enable_disable_clock.patch
@@ -283,6 +286,7 @@ rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
# DRIVERS SERIAL
drivers-tty-fix-omap-lock-crap.patch
drivers-tty-pl011-irq-disable-madness.patch
+tty-serial-pl011-warning-about-uninitialized.patch
rt-serial-warn-fix.patch
tty-serial-8250-don-t-take-the-trylock-during-oops.patch
@@ -416,6 +420,7 @@ drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
drivers-zram-fix-zcomp_stream_get-smp_processor_id-u.patch
tpm_tis-fix-stall-after-iowrite-s.patch
+watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch
# I915
drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
diff --git a/patches/tty-serial-pl011-warning-about-uninitialized.patch b/patches/tty-serial-pl011-warning-about-uninitialized.patch
new file mode 100644
index 000000000000..76f39fd86f84
--- /dev/null
+++ b/patches/tty-serial-pl011-warning-about-uninitialized.patch
@@ -0,0 +1,37 @@
+From: Kurt Kanzenbach <kurt@linutronix.de>
+Date: Mon, 24 Sep 2018 10:29:01 +0200
+Subject: [PATCH] tty: serial: pl011: explicitly initialize the flags variable
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Silence the following gcc warning:
+
+drivers/tty/serial/amba-pl011.c: In function ‘pl011_console_write’:
+./include/linux/spinlock.h:260:3: warning: ‘flags’ may be used uninitialized in this function [-Wmaybe-uninitialized]
+ _raw_spin_unlock_irqrestore(lock, flags); \
+ ^~~~~~~~~~~~~~~~~~~~~~~~~~~
+drivers/tty/serial/amba-pl011.c:2214:16: note: ‘flags’ was declared here
+ unsigned long flags;
+ ^~~~~
+
+The code is correct. Thus, initializing flags to zero doesn't change the
+behavior and resolves the warning.
+
+Signed-off-by: Kurt Kanzenbach <kurt@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/amba-pl011.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -2211,7 +2211,7 @@ pl011_console_write(struct console *co,
+ {
+ struct uart_amba_port *uap = amba_ports[co->index];
+ unsigned int old_cr = 0, new_cr;
+- unsigned long flags;
++ unsigned long flags = 0;
+ int locked = 1;
+
+ clk_enable(uap->clk);
diff --git a/patches/watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch b/patches/watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch
new file mode 100644
index 000000000000..2e954cf7d3c1
--- /dev/null
+++ b/patches/watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch
@@ -0,0 +1,74 @@
+From: Julia Cartwright <julia@ni.com>
+Date: Fri, 28 Sep 2018 21:03:51 +0000
+Subject: [PATCH] watchdog: prevent deferral of watchdogd wakeup on RT
+
+When PREEMPT_RT_FULL is enabled, all hrtimer expiry functions are
+deferred for execution into the context of ktimersoftd unless otherwise
+annotated.
+
+Deferring the expiry of the hrtimer used by the watchdog core, however,
+is a waste, as the callback does nothing but queue a kthread work item
+and wakeup watchdogd.
+
+It's worst then that, too: the deferral through ktimersoftd also means
+that for correct behavior a user must adjust the scheduling parameters
+of both watchdogd _and_ ktimersoftd, which is unnecessary and has other
+side effects (like causing unrelated expiry functions to execute at
+potentially elevated priority).
+
+Instead, mark the hrtimer used by the watchdog core as being _HARD to
+allow it's execution directly from hardirq context. The work done in
+this expiry function is well-bounded and minimal.
+
+A user still must adjust the scheduling parameters of the watchdogd
+to be correct w.r.t. their application needs.
+
+Cc: Guenter Roeck <linux@roeck-us.net>
+Reported-and-tested-by: Steffen Trumtrar <s.trumtrar@pengutronix.de>
+Reported-by: Tim Sander <tim@krieglstein.org>
+Signed-off-by: Julia Cartwright <julia@ni.com>
+Acked-by: Guenter Roeck <linux@roeck-us.net>
+[bigeasy: use only HRTIMER_MODE_REL_HARD]
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/watchdog/watchdog_dev.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/watchdog/watchdog_dev.c
++++ b/drivers/watchdog/watchdog_dev.c
+@@ -147,7 +147,7 @@ static inline void watchdog_update_worke
+ ktime_t t = watchdog_next_keepalive(wdd);
+
+ if (t > 0)
+- hrtimer_start(&wd_data->timer, t, HRTIMER_MODE_REL);
++ hrtimer_start(&wd_data->timer, t, HRTIMER_MODE_REL_HARD);
+ } else {
+ hrtimer_cancel(&wd_data->timer);
+ }
+@@ -166,7 +166,7 @@ static int __watchdog_ping(struct watchd
+ if (ktime_after(earliest_keepalive, now)) {
+ hrtimer_start(&wd_data->timer,
+ ktime_sub(earliest_keepalive, now),
+- HRTIMER_MODE_REL);
++ HRTIMER_MODE_REL_HARD);
+ return 0;
+ }
+
+@@ -945,7 +945,7 @@ static int watchdog_cdev_register(struct
+ return -ENODEV;
+
+ kthread_init_work(&wd_data->work, watchdog_ping_work);
+- hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
+ wd_data->timer.function = watchdog_timer_expired;
+
+ if (wdd->id == 0) {
+@@ -992,7 +992,7 @@ static int watchdog_cdev_register(struct
+ __module_get(wdd->ops->owner);
+ kref_get(&wd_data->kref);
+ if (handle_boot_enabled)
+- hrtimer_start(&wd_data->timer, 0, HRTIMER_MODE_REL);
++ hrtimer_start(&wd_data->timer, 0, HRTIMER_MODE_REL_HARD);
+ else
+ pr_info("watchdog%d running and kernel based pre-userspace handler disabled\n",
+ wdd->id);
diff --git a/patches/workqueue-distangle-from-rq-lock.patch b/patches/workqueue-distangle-from-rq-lock.patch
index 8bf270fc3348..5771ad67d29b 100644
--- a/patches/workqueue-distangle-from-rq-lock.patch
+++ b/patches/workqueue-distangle-from-rq-lock.patch
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1722,10 +1722,6 @@ static inline void ttwu_activate(struct
+@@ -1723,10 +1723,6 @@ static inline void ttwu_activate(struct
{
activate_task(rq, p, en_flags);
p->on_rq = TASK_ON_RQ_QUEUED;
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2166,56 +2162,6 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2167,56 +2163,6 @@ try_to_wake_up(struct task_struct *p, un
}
/**
@@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
*
-@@ -3541,21 +3487,6 @@ static void __sched notrace __schedule(b
+@@ -3542,21 +3488,6 @@ static void __sched notrace __schedule(b
atomic_inc(&rq->nr_iowait);
delayacct_blkio_start();
}
@@ -123,7 +123,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
switch_count = &prev->nvcsw;
}
-@@ -3615,6 +3546,20 @@ static inline void sched_submit_work(str
+@@ -3616,6 +3547,20 @@ static inline void sched_submit_work(str
{
if (!tsk->state || tsk_is_pi_blocked(tsk))
return;
@@ -144,7 +144,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
-@@ -3623,6 +3568,12 @@ static inline void sched_submit_work(str
+@@ -3624,6 +3569,12 @@ static inline void sched_submit_work(str
blk_schedule_flush_plug(tsk);
}
@@ -157,7 +157,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
asmlinkage __visible void __sched schedule(void)
{
struct task_struct *tsk = current;
-@@ -3633,6 +3584,7 @@ asmlinkage __visible void __sched schedu
+@@ -3634,6 +3585,7 @@ asmlinkage __visible void __sched schedu
__schedule(false);
sched_preempt_enable_no_resched();
} while (need_resched());
diff --git a/patches/workqueue-prevent-deadlock-stall.patch b/patches/workqueue-prevent-deadlock-stall.patch
index 54f98dffbe13..ebc2c54051ff 100644
--- a/patches/workqueue-prevent-deadlock-stall.patch
+++ b/patches/workqueue-prevent-deadlock-stall.patch
@@ -43,7 +43,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3590,9 +3590,8 @@ void __noreturn do_task_dead(void)
+@@ -3591,9 +3591,8 @@ void __noreturn do_task_dead(void)
static inline void sched_submit_work(struct task_struct *tsk)
{
@@ -54,7 +54,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
/*
* If a worker went to sleep, notify and ask workqueue whether
* it wants to wake up a task to maintain concurrency.
-@@ -3606,6 +3605,9 @@ static inline void sched_submit_work(str
+@@ -3607,6 +3606,9 @@ static inline void sched_submit_work(str
preempt_enable_no_resched();
}