summaryrefslogtreecommitdiff
path: root/patches
diff options
context:
space:
mode:
Diffstat (limited to 'patches')
-rw-r--r--patches/add_migrate_disable.patch8
-rw-r--r--patches/completion-use-simple-wait-queues.patch4
-rw-r--r--patches/fs-aio-simple-simple-work.patch8
-rw-r--r--patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch2
-rw-r--r--patches/futex-workaround-migrate_disable-enable-in-different.patch4
-rw-r--r--patches/hotplug-light-get-online-cpus.patch4
-rw-r--r--patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch4
-rw-r--r--patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch6
-rw-r--r--patches/hrtimers-prepare-full-preemption.patch14
-rw-r--r--patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch58
-rw-r--r--patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch2
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/locking-rt-mutex-Flush-block-plug-on-__down_read.patch39
-rw-r--r--patches/locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch2
-rw-r--r--patches/mm-convert-swap-to-percpu-locked.patch2
-rw-r--r--patches/mm-disable-sloub-rt.patch4
-rw-r--r--patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch4
-rw-r--r--patches/panic-disable-random-on-rt.patch2
-rw-r--r--patches/preempt-lazy-support.patch30
-rw-r--r--patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch6
-rw-r--r--patches/random-make-it-work-on-rt.patch4
-rw-r--r--patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch6
-rw-r--r--patches/rtmutex-add-sleeping-lock-implementation.patch12
-rw-r--r--patches/rtmutex-annotate-sleeping-lock-context.patch2
-rw-r--r--patches/rtmutex-futex-prepare-rt.patch12
-rw-r--r--patches/sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch2
-rw-r--r--patches/sched-disable-rt-group-sched-on-rt.patch2
-rw-r--r--patches/sched-fair-Make-the-hrtimers-non-hard-again.patch27
-rw-r--r--patches/sched-fair-Robustify-CFS-bandwidth-timer-locking.patch141
-rw-r--r--patches/sched-might-sleep-do-not-account-rcu-depth.patch2
-rw-r--r--patches/sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch2
-rw-r--r--patches/sched-migrate_disable-fallback-to-preempt_disable-in.patch10
-rw-r--r--patches/sched-mmdrop-delayed.patch8
-rw-r--r--patches/sched-rt-mutex-wakeup.patch6
-rw-r--r--patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch2
-rw-r--r--patches/series4
-rw-r--r--patches/slub-disable-SLUB_CPU_PARTIAL.patch2
-rw-r--r--patches/timers-prepare-for-full-preemption.patch4
-rw-r--r--patches/workqueue-distangle-from-rq-lock.patch12
-rw-r--r--patches/workqueue-prevent-deadlock-stall.patch4
-rw-r--r--patches/x86-kvm-require-const-tsc-for-rt.patch2
41 files changed, 341 insertions, 130 deletions
diff --git a/patches/add_migrate_disable.patch b/patches/add_migrate_disable.patch
index 8aa2caf6a1b1..144e7c0a8bd5 100644
--- a/patches/add_migrate_disable.patch
+++ b/patches/add_migrate_disable.patch
@@ -79,7 +79,7 @@ Subject: kernel/sched/core: add migrate_disable()
* boot command line:
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1007,7 +1007,15 @@ void set_cpus_allowed_common(struct task
+@@ -1006,7 +1006,15 @@ void set_cpus_allowed_common(struct task
p->nr_cpus_allowed = cpumask_weight(new_mask);
}
@@ -96,7 +96,7 @@ Subject: kernel/sched/core: add migrate_disable()
{
struct rq *rq = task_rq(p);
bool queued, running;
-@@ -1036,6 +1044,20 @@ void do_set_cpus_allowed(struct task_str
+@@ -1035,6 +1043,20 @@ void do_set_cpus_allowed(struct task_str
set_curr_task(rq, p);
}
@@ -117,7 +117,7 @@ Subject: kernel/sched/core: add migrate_disable()
/*
* Change a given task's CPU affinity. Migrate the thread to a
* proper CPU and schedule it away if the CPU it's executing on
-@@ -1094,9 +1116,16 @@ static int __set_cpus_allowed_ptr(struct
+@@ -1093,9 +1115,16 @@ static int __set_cpus_allowed_ptr(struct
}
/* Can the task run on the task's current CPU? If so, we're done */
@@ -135,7 +135,7 @@ Subject: kernel/sched/core: add migrate_disable()
dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
if (task_running(rq, p) || p->state == TASK_WAKING) {
struct migration_arg arg = { p, dest_cpu };
-@@ -7059,3 +7088,100 @@ const u32 sched_prio_to_wmult[40] = {
+@@ -7058,3 +7087,100 @@ const u32 sched_prio_to_wmult[40] = {
};
#undef CREATE_TRACE_POINTS
diff --git a/patches/completion-use-simple-wait-queues.patch b/patches/completion-use-simple-wait-queues.patch
index d186cea64e60..73bf7cf146d6 100644
--- a/patches/completion-use-simple-wait-queues.patch
+++ b/patches/completion-use-simple-wait-queues.patch
@@ -299,7 +299,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
EXPORT_SYMBOL(completion_done);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7108,7 +7108,10 @@ void migrate_disable(void)
+@@ -7107,7 +7107,10 @@ void migrate_disable(void)
return;
}
#ifdef CONFIG_SCHED_DEBUG
@@ -311,7 +311,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
if (p->migrate_disable) {
-@@ -7138,7 +7141,10 @@ void migrate_enable(void)
+@@ -7137,7 +7140,10 @@ void migrate_enable(void)
}
#ifdef CONFIG_SCHED_DEBUG
diff --git a/patches/fs-aio-simple-simple-work.patch b/patches/fs-aio-simple-simple-work.patch
index 443e3ec835f4..948376a9e029 100644
--- a/patches/fs-aio-simple-simple-work.patch
+++ b/patches/fs-aio-simple-simple-work.patch
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <asm/kmap_types.h>
#include <linux/uaccess.h>
-@@ -120,6 +121,7 @@ struct kioctx {
+@@ -121,6 +122,7 @@ struct kioctx {
long nr_pages;
struct rcu_work free_rwork; /* see free_ioctx() */
@@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* signals when all in-flight requests are done
-@@ -254,6 +256,7 @@ static int __init aio_setup(void)
+@@ -255,6 +257,7 @@ static int __init aio_setup(void)
.mount = aio_mount,
.kill_sb = kill_anon_super,
};
@@ -53,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
aio_mnt = kern_mount(&aio_fs);
if (IS_ERR(aio_mnt))
panic("Failed to create aio fs mount.");
-@@ -595,9 +598,9 @@ static void free_ioctx_reqs(struct percp
+@@ -596,9 +599,9 @@ static void free_ioctx_reqs(struct percp
* and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
* now it's safe to cancel any that need to be.
*/
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct aio_kiocb *req;
spin_lock_irq(&ctx->ctx_lock);
-@@ -615,6 +618,14 @@ static void free_ioctx_users(struct perc
+@@ -616,6 +619,14 @@ static void free_ioctx_users(struct perc
percpu_ref_put(&ctx->reqs);
}
diff --git a/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch b/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
index b9515da52876..41879c4d5c38 100644
--- a/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
+++ b/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
@@ -162,7 +162,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto end_instantiate;
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
-@@ -678,7 +678,7 @@ static bool proc_sys_fill_cache(struct f
+@@ -677,7 +677,7 @@ static bool proc_sys_fill_cache(struct f
child = d_lookup(dir, &qname);
if (!child) {
diff --git a/patches/futex-workaround-migrate_disable-enable-in-different.patch b/patches/futex-workaround-migrate_disable-enable-in-different.patch
index 2bc6412a747a..317df4e52e6f 100644
--- a/patches/futex-workaround-migrate_disable-enable-in-different.patch
+++ b/patches/futex-workaround-migrate_disable-enable-in-different.patch
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -2798,9 +2798,18 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2855,9 +2855,18 @@ static int futex_lock_pi(u32 __user *uad
* lock handoff sequence.
*/
raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (ret) {
if (ret == 1)
-@@ -2947,11 +2956,21 @@ static int futex_unlock_pi(u32 __user *u
+@@ -3004,11 +3013,21 @@ static int futex_unlock_pi(u32 __user *u
* observed.
*/
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
diff --git a/patches/hotplug-light-get-online-cpus.patch b/patches/hotplug-light-get-online-cpus.patch
index 0a7b6988255f..bf5a3a6b7aab 100644
--- a/patches/hotplug-light-get-online-cpus.patch
+++ b/patches/hotplug-light-get-online-cpus.patch
@@ -64,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void cpus_read_lock(void)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7203,6 +7203,7 @@ void migrate_disable(void)
+@@ -7202,6 +7202,7 @@ void migrate_disable(void)
}
preempt_disable();
@@ -72,7 +72,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
migrate_disable_update_cpus_allowed(p);
p->migrate_disable = 1;
-@@ -7268,12 +7269,15 @@ void migrate_enable(void)
+@@ -7267,12 +7268,15 @@ void migrate_enable(void)
arg.task = p;
arg.dest_cpu = dest_cpu;
diff --git a/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch b/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch
index b25d26fc0cb8..a883ae776797 100644
--- a/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch
+++ b/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch
@@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -315,7 +315,7 @@ static void hrtick_rq_init(struct rq *rq
+@@ -314,7 +314,7 @@ static void hrtick_rq_init(struct rq *rq
rq->hrtick_csd.info = rq;
#endif
@@ -102,7 +102,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -4878,9 +4878,9 @@ void init_cfs_bandwidth(struct cfs_bandw
+@@ -4880,9 +4880,9 @@ void init_cfs_bandwidth(struct cfs_bandw
cfs_b->period = ns_to_ktime(default_cfs_period());
INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
diff --git a/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch b/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
index 5e50d43e2f0e..4aa3e759d010 100644
--- a/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
+++ b/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
@@ -116,7 +116,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
current->timer_slack_ns, \
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -2624,10 +2624,9 @@ static int futex_wait(u32 __user *uaddr,
+@@ -2681,10 +2681,9 @@ static int futex_wait(u32 __user *uaddr,
if (abs_time) {
to = &timeout;
@@ -130,7 +130,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
hrtimer_set_expires_range_ns(&to->timer, *abs_time,
current->timer_slack_ns);
}
-@@ -2726,9 +2725,8 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2783,9 +2782,8 @@ static int futex_lock_pi(u32 __user *uad
if (time) {
to = &timeout;
@@ -142,7 +142,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
hrtimer_set_expires(&to->timer, *time);
}
-@@ -3144,10 +3142,9 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3201,10 +3199,9 @@ static int futex_wait_requeue_pi(u32 __u
if (abs_time) {
to = &timeout;
diff --git a/patches/hrtimers-prepare-full-preemption.patch b/patches/hrtimers-prepare-full-preemption.patch
index e9838c7bb602..7bf4bd2955fc 100644
--- a/patches/hrtimers-prepare-full-preemption.patch
+++ b/patches/hrtimers-prepare-full-preemption.patch
@@ -173,7 +173,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
expires = timeval_to_ktime(value->it_value);
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
-@@ -466,7 +466,7 @@ static struct k_itimer * alloc_posix_tim
+@@ -463,7 +463,7 @@ static struct k_itimer * alloc_posix_tim
static void k_itimer_rcu_free(struct rcu_head *head)
{
@@ -182,7 +182,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kmem_cache_free(posix_timers_cache, tmr);
}
-@@ -483,7 +483,7 @@ static void release_posix_timer(struct k
+@@ -480,7 +480,7 @@ static void release_posix_timer(struct k
}
put_pid(tmr->it_pid);
sigqueue_free(tmr->sigq);
@@ -191,7 +191,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static int common_timer_create(struct k_itimer *new_timer)
-@@ -824,6 +824,22 @@ static void common_hrtimer_arm(struct k_
+@@ -821,6 +821,22 @@ static void common_hrtimer_arm(struct k_
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
@@ -214,7 +214,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static int common_hrtimer_try_to_cancel(struct k_itimer *timr)
{
return hrtimer_try_to_cancel(&timr->it.real.timer);
-@@ -888,6 +904,7 @@ static int do_timer_settime(timer_t time
+@@ -885,6 +901,7 @@ static int do_timer_settime(timer_t time
if (!timr)
return -EINVAL;
@@ -222,7 +222,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kc = timr->kclock;
if (WARN_ON_ONCE(!kc || !kc->timer_set))
error = -EINVAL;
-@@ -896,9 +913,12 @@ static int do_timer_settime(timer_t time
+@@ -893,9 +910,12 @@ static int do_timer_settime(timer_t time
unlock_timer(timr, flag);
if (error == TIMER_RETRY) {
@@ -235,7 +235,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return error;
}
-@@ -980,10 +1000,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, t
+@@ -977,10 +997,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, t
if (!timer)
return -EINVAL;
@@ -251,7 +251,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
spin_lock(&current->sighand->siglock);
list_del(&timer->list);
-@@ -1009,8 +1034,18 @@ static void itimer_delete(struct k_itime
+@@ -1006,8 +1031,18 @@ static void itimer_delete(struct k_itime
retry_delete:
spin_lock_irqsave(&timer->it_lock, flags);
diff --git a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
index b409f1ed624a..656bc66d1dc0 100644
--- a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
+++ b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
@@ -304,7 +304,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* One for us, one for whoever does the "release_task()" (usually
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -877,7 +877,7 @@ static inline bool is_per_cpu_kthread(st
+@@ -876,7 +876,7 @@ static inline bool is_per_cpu_kthread(st
*/
static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
{
@@ -313,7 +313,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return false;
if (is_per_cpu_kthread(p))
-@@ -972,7 +972,7 @@ static int migration_cpu_stop(void *data
+@@ -971,7 +971,7 @@ static int migration_cpu_stop(void *data
local_irq_disable();
/*
* We need to explicitly wake pending tasks before running
@@ -322,7 +322,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
*/
sched_ttwu_pending();
-@@ -1003,7 +1003,7 @@ static int migration_cpu_stop(void *data
+@@ -1002,7 +1002,7 @@ static int migration_cpu_stop(void *data
*/
void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
{
@@ -331,7 +331,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
p->nr_cpus_allowed = cpumask_weight(new_mask);
}
-@@ -1073,7 +1073,7 @@ static int __set_cpus_allowed_ptr(struct
+@@ -1072,7 +1072,7 @@ static int __set_cpus_allowed_ptr(struct
goto out;
}
@@ -340,7 +340,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
-@@ -1236,10 +1236,10 @@ static int migrate_swap_stop(void *data)
+@@ -1235,10 +1235,10 @@ static int migrate_swap_stop(void *data)
if (task_cpu(arg->src_task) != arg->src_cpu)
goto unlock;
@@ -353,7 +353,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto unlock;
__migrate_swap_task(arg->src_task, arg->dst_cpu);
-@@ -1281,10 +1281,10 @@ int migrate_swap(struct task_struct *cur
+@@ -1280,10 +1280,10 @@ int migrate_swap(struct task_struct *cur
if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
goto out;
@@ -366,7 +366,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
-@@ -1429,7 +1429,7 @@ void kick_process(struct task_struct *p)
+@@ -1428,7 +1428,7 @@ void kick_process(struct task_struct *p)
EXPORT_SYMBOL_GPL(kick_process);
/*
@@ -375,7 +375,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
* A few notes on cpu_active vs cpu_online:
*
-@@ -1469,14 +1469,14 @@ static int select_fallback_rq(int cpu, s
+@@ -1468,14 +1468,14 @@ static int select_fallback_rq(int cpu, s
for_each_cpu(dest_cpu, nodemask) {
if (!cpu_active(dest_cpu))
continue;
@@ -392,7 +392,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!is_cpu_allowed(p, dest_cpu))
continue;
-@@ -1520,7 +1520,7 @@ static int select_fallback_rq(int cpu, s
+@@ -1519,7 +1519,7 @@ static int select_fallback_rq(int cpu, s
}
/*
@@ -401,7 +401,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
static inline
int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
-@@ -1530,11 +1530,11 @@ int select_task_rq(struct task_struct *p
+@@ -1529,11 +1529,11 @@ int select_task_rq(struct task_struct *p
if (p->nr_cpus_allowed > 1)
cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
else
@@ -415,7 +415,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* CPU.
*
* Since this is common to all placement strategies, this lives here.
-@@ -2401,7 +2401,7 @@ void wake_up_new_task(struct task_struct
+@@ -2400,7 +2400,7 @@ void wake_up_new_task(struct task_struct
#ifdef CONFIG_SMP
/*
* Fork balancing, do it here and not earlier because:
@@ -424,7 +424,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* - any previously selected CPU might disappear through hotplug
*
* Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
-@@ -4274,7 +4274,7 @@ static int __sched_setscheduler(struct t
+@@ -4273,7 +4273,7 @@ static int __sched_setscheduler(struct t
* the entire root_domain to become SCHED_DEADLINE. We
* will also fail if there's no bandwidth available.
*/
@@ -433,7 +433,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rq->rd->dl_bw.bw == 0) {
task_rq_unlock(rq, p, &rf);
return -EPERM;
-@@ -4873,7 +4873,7 @@ long sched_getaffinity(pid_t pid, struct
+@@ -4872,7 +4872,7 @@ long sched_getaffinity(pid_t pid, struct
goto out_unlock;
raw_spin_lock_irqsave(&p->pi_lock, flags);
@@ -442,7 +442,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
out_unlock:
-@@ -5453,7 +5453,7 @@ int task_can_attach(struct task_struct *
+@@ -5452,7 +5452,7 @@ int task_can_attach(struct task_struct *
* allowed nodes is unnecessary. Thus, cpusets are not
* applicable for such threads. This prevents checking for
* success of set_cpus_allowed_ptr() on all attached tasks
@@ -451,7 +451,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
if (p->flags & PF_NO_SETAFFINITY) {
ret = -EINVAL;
-@@ -5480,7 +5480,7 @@ int migrate_task_to(struct task_struct *
+@@ -5479,7 +5479,7 @@ int migrate_task_to(struct task_struct *
if (curr_cpu == target_cpu)
return 0;
@@ -460,7 +460,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return -EINVAL;
/* TODO: This is not properly updating schedstats */
-@@ -5618,7 +5618,7 @@ static void migrate_tasks(struct rq *dea
+@@ -5617,7 +5617,7 @@ static void migrate_tasks(struct rq *dea
put_prev_task(rq, next);
/*
@@ -553,7 +553,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
env->dst_cpu = cpu;
-@@ -5711,7 +5711,7 @@ find_idlest_group(struct sched_domain *s
+@@ -5713,7 +5713,7 @@ find_idlest_group(struct sched_domain *s
/* Skip over this group if it has no CPUs allowed */
if (!cpumask_intersects(sched_group_span(group),
@@ -562,7 +562,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
local_group = cpumask_test_cpu(this_cpu,
-@@ -5843,7 +5843,7 @@ find_idlest_group_cpu(struct sched_group
+@@ -5845,7 +5845,7 @@ find_idlest_group_cpu(struct sched_group
return cpumask_first(sched_group_span(group));
/* Traverse only the allowed CPUs */
@@ -571,7 +571,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (available_idle_cpu(i)) {
struct rq *rq = cpu_rq(i);
struct cpuidle_state *idle = idle_get_state(rq);
-@@ -5883,7 +5883,7 @@ static inline int find_idlest_cpu(struct
+@@ -5885,7 +5885,7 @@ static inline int find_idlest_cpu(struct
{
int new_cpu = cpu;
@@ -580,7 +580,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return prev_cpu;
/*
-@@ -5999,7 +5999,7 @@ static int select_idle_core(struct task_
+@@ -6001,7 +6001,7 @@ static int select_idle_core(struct task_
if (!test_idle_cores(target, false))
return -1;
@@ -589,7 +589,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for_each_cpu_wrap(core, cpus, target) {
bool idle = true;
-@@ -6033,7 +6033,7 @@ static int select_idle_smt(struct task_s
+@@ -6035,7 +6035,7 @@ static int select_idle_smt(struct task_s
return -1;
for_each_cpu(cpu, cpu_smt_mask(target)) {
@@ -598,7 +598,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
if (available_idle_cpu(cpu))
return cpu;
-@@ -6096,7 +6096,7 @@ static int select_idle_cpu(struct task_s
+@@ -6098,7 +6098,7 @@ static int select_idle_cpu(struct task_s
for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
if (!--nr)
return -1;
@@ -607,7 +607,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
if (available_idle_cpu(cpu))
break;
-@@ -6133,7 +6133,7 @@ static int select_idle_sibling(struct ta
+@@ -6135,7 +6135,7 @@ static int select_idle_sibling(struct ta
recent_used_cpu != target &&
cpus_share_cache(recent_used_cpu, target) &&
available_idle_cpu(recent_used_cpu) &&
@@ -616,7 +616,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Replace recent_used_cpu with prev as it is a potential
* candidate for the next wake:
-@@ -6351,7 +6351,7 @@ select_task_rq_fair(struct task_struct *
+@@ -6353,7 +6353,7 @@ select_task_rq_fair(struct task_struct *
if (sd_flag & SD_BALANCE_WAKE) {
record_wakee(p);
want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu)
@@ -625,7 +625,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
rcu_read_lock();
-@@ -7090,14 +7090,14 @@ int can_migrate_task(struct task_struct
+@@ -7092,14 +7092,14 @@ int can_migrate_task(struct task_struct
/*
* We do not migrate tasks that are:
* 1) throttled_lb_pair, or
@@ -642,7 +642,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int cpu;
schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
-@@ -7117,7 +7117,7 @@ int can_migrate_task(struct task_struct
+@@ -7119,7 +7119,7 @@ int can_migrate_task(struct task_struct
/* Prevent to re-select dst_cpu via env's CPUs: */
for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
@@ -651,7 +651,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
env->flags |= LBF_DST_PINNED;
env->new_dst_cpu = cpu;
break;
-@@ -7738,7 +7738,7 @@ check_cpu_capacity(struct rq *rq, struct
+@@ -7740,7 +7740,7 @@ check_cpu_capacity(struct rq *rq, struct
/*
* Group imbalance indicates (and tries to solve) the problem where balancing
@@ -660,7 +660,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
* Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a
* cpumask covering 1 CPU of the first group and 3 CPUs of the second group.
-@@ -8353,7 +8353,7 @@ static struct sched_group *find_busiest_
+@@ -8355,7 +8355,7 @@ static struct sched_group *find_busiest_
/*
* If the busiest group is imbalanced the below checks don't
* work because they assume all things are equal, which typically
@@ -669,7 +669,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
if (busiest->group_type == group_imbalanced)
goto force_balance;
-@@ -8749,7 +8749,7 @@ static int load_balance(int this_cpu, st
+@@ -8751,7 +8751,7 @@ static int load_balance(int this_cpu, st
* if the curr task on busiest CPU can't be
* moved to this_cpu:
*/
diff --git a/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
index 7fa3033be146..65db86c3bf52 100644
--- a/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
+++ b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
security_task_free(tsk);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2740,15 +2740,6 @@ static struct rq *finish_task_switch(str
+@@ -2739,15 +2739,6 @@ static struct rq *finish_task_switch(str
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 02952cda4bfa..e16fb07c0a7d 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt9
++-rt10
diff --git a/patches/locking-rt-mutex-Flush-block-plug-on-__down_read.patch b/patches/locking-rt-mutex-Flush-block-plug-on-__down_read.patch
new file mode 100644
index 000000000000..6fe10914cc99
--- /dev/null
+++ b/patches/locking-rt-mutex-Flush-block-plug-on-__down_read.patch
@@ -0,0 +1,39 @@
+From: Scott Wood <swood@redhat.com>
+Date: Fri, 4 Jan 2019 15:33:21 -0500
+Subject: [PATCH] locking/rt-mutex: Flush block plug on __down_read()
+
+__down_read() bypasses the rtmutex frontend to call
+rt_mutex_slowlock_locked() directly, and thus it needs to call
+blk_schedule_flush_flug() itself.
+
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Scott Wood <swood@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/locking/rwsem-rt.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/kernel/locking/rwsem-rt.c
++++ b/kernel/locking/rwsem-rt.c
+@@ -1,5 +1,6 @@
+ /*
+ */
++#include <linux/blkdev.h>
+ #include <linux/rwsem.h>
+ #include <linux/sched/debug.h>
+ #include <linux/sched/signal.h>
+@@ -87,6 +88,14 @@ static int __sched __down_read_common(st
+
+ if (__down_read_trylock(sem))
+ return 0;
++ /*
++ * If rt_mutex blocks, the function sched_submit_work will not call
++ * blk_schedule_flush_plug (because tsk_is_pi_blocked would be true).
++ * We must call blk_schedule_flush_plug here, if we don't call it,
++ * a deadlock in I/O may happen.
++ */
++ if (unlikely(blk_needs_flush_plug(current)))
++ blk_schedule_flush_plug(current);
+
+ might_sleep();
+ raw_spin_lock_irq(&m->wait_lock);
diff --git a/patches/locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch b/patches/locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch
index 2148ff6b5c46..69e54df77e02 100644
--- a/patches/locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch
+++ b/patches/locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch
@@ -53,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ * If rt_mutex blocks, the function sched_submit_work will not call
+ * blk_schedule_flush_plug (because tsk_is_pi_blocked would be true).
+ * We must call blk_schedule_flush_plug here, if we don't call it,
-+ * a deadlock in device mapper may happen.
++ * a deadlock in I/O may happen.
+ */
+ if (unlikely(blk_needs_flush_plug(current)))
+ blk_schedule_flush_plug(current);
diff --git a/patches/mm-convert-swap-to-percpu-locked.patch b/patches/mm-convert-swap-to-percpu-locked.patch
index 7c29e8d42337..a5d3f5929da6 100644
--- a/patches/mm-convert-swap-to-percpu-locked.patch
+++ b/patches/mm-convert-swap-to-percpu-locked.patch
@@ -52,7 +52,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -7184,8 +7184,9 @@ void __init free_area_init(unsigned long
+@@ -7196,8 +7196,9 @@ void __init free_area_init(unsigned long
static int page_alloc_cpu_dead(unsigned int cpu)
{
diff --git a/patches/mm-disable-sloub-rt.patch b/patches/mm-disable-sloub-rt.patch
index b0df4353043b..41dc6c2737e2 100644
--- a/patches/mm-disable-sloub-rt.patch
+++ b/patches/mm-disable-sloub-rt.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1628,6 +1628,7 @@ choice
+@@ -1633,6 +1633,7 @@ choice
config SLAB
bool "SLAB"
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
select HAVE_HARDENED_USERCOPY_ALLOCATOR
help
The regular slab allocator that is established and known to work
-@@ -1648,6 +1649,7 @@ config SLUB
+@@ -1653,6 +1654,7 @@ config SLUB
config SLOB
depends on EXPERT
bool "SLOB (Simple Allocator)"
diff --git a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
index 977d0320376f..91bf513c25f3 100644
--- a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
+++ b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
@@ -212,7 +212,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return NULL;
}
-@@ -8070,7 +8093,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -8085,7 +8108,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
@@ -221,7 +221,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -8079,7 +8102,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -8094,7 +8117,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
diff --git a/patches/panic-disable-random-on-rt.patch b/patches/panic-disable-random-on-rt.patch
index e786037ea349..51aa8c6c304b 100644
--- a/patches/panic-disable-random-on-rt.patch
+++ b/patches/panic-disable-random-on-rt.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/panic.c
+++ b/kernel/panic.c
-@@ -475,9 +475,11 @@ static u64 oops_id;
+@@ -479,9 +479,11 @@ static u64 oops_id;
static int init_oops_id(void)
{
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index 8d40c9e02220..9a02f09f143e 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -250,7 +250,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
goto again;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -492,6 +492,48 @@ void resched_curr(struct rq *rq)
+@@ -491,6 +491,48 @@ void resched_curr(struct rq *rq)
trace_sched_wake_idle_without_ipi(cpu);
}
@@ -299,7 +299,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
-@@ -2404,6 +2446,9 @@ int sched_fork(unsigned long clone_flags
+@@ -2403,6 +2445,9 @@ int sched_fork(unsigned long clone_flags
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -309,7 +309,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -3471,6 +3516,7 @@ static void __sched notrace __schedule(b
+@@ -3470,6 +3515,7 @@ static void __sched notrace __schedule(b
next = pick_next_task(rq, prev, &rf);
clear_tsk_need_resched(prev);
@@ -317,7 +317,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_preempt_need_resched();
if (likely(prev != next)) {
-@@ -3651,6 +3697,30 @@ static void __sched notrace preempt_sche
+@@ -3650,6 +3696,30 @@ static void __sched notrace preempt_sche
} while (need_resched());
}
@@ -348,7 +348,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
-@@ -3665,7 +3735,8 @@ asmlinkage __visible void __sched notrac
+@@ -3664,7 +3734,8 @@ asmlinkage __visible void __sched notrac
*/
if (likely(!preemptible()))
return;
@@ -358,7 +358,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
-@@ -3692,6 +3763,9 @@ asmlinkage __visible void __sched notrac
+@@ -3691,6 +3762,9 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
@@ -368,7 +368,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
/*
* Because the function tracer can trace preempt_count_sub()
-@@ -5460,7 +5534,9 @@ void init_idle(struct task_struct *idle,
+@@ -5459,7 +5533,9 @@ void init_idle(struct task_struct *idle,
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
@@ -379,7 +379,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The idle tasks have their own, simple scheduling class:
*/
-@@ -7182,6 +7258,7 @@ void migrate_disable(void)
+@@ -7181,6 +7257,7 @@ void migrate_disable(void)
}
preempt_disable();
@@ -387,7 +387,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pin_current_cpu();
migrate_disable_update_cpus_allowed(p);
-@@ -7249,6 +7326,7 @@ void migrate_enable(void)
+@@ -7248,6 +7325,7 @@ void migrate_enable(void)
arg.dest_cpu = dest_cpu;
unpin_current_cpu();
@@ -395,7 +395,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_enable();
stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
tlb_migrate_finish(p->mm);
-@@ -7257,6 +7335,7 @@ void migrate_enable(void)
+@@ -7256,6 +7334,7 @@ void migrate_enable(void)
}
}
unpin_current_cpu();
@@ -441,7 +441,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static __always_inline
-@@ -5037,7 +5037,7 @@ static void hrtick_start_fair(struct rq
+@@ -5039,7 +5039,7 @@ static void hrtick_start_fair(struct rq
if (delta < 0) {
if (rq->curr == p)
@@ -450,7 +450,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
hrtick_start(rq, delta);
-@@ -6612,7 +6612,7 @@ static void check_preempt_wakeup(struct
+@@ -6614,7 +6614,7 @@ static void check_preempt_wakeup(struct
return;
preempt:
@@ -459,7 +459,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -9723,7 +9723,7 @@ static void task_fork_fair(struct task_s
+@@ -9725,7 +9725,7 @@ static void task_fork_fair(struct task_s
* 'current' within the tree based on its new key value.
*/
swap(curr->vruntime, se->vruntime);
@@ -468,7 +468,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
se->vruntime -= cfs_rq->min_vruntime;
-@@ -9747,7 +9747,7 @@ prio_changed_fair(struct rq *rq, struct
+@@ -9749,7 +9749,7 @@ prio_changed_fair(struct rq *rq, struct
*/
if (rq->curr == p) {
if (p->prio > oldprio)
@@ -491,7 +491,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1639,6 +1639,15 @@ extern void reweight_task(struct task_st
+@@ -1638,6 +1638,15 @@ extern void reweight_task(struct task_st
extern void resched_curr(struct rq *rq);
extern void resched_cpu(int cpu);
diff --git a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
index 49e33c54e4d9..6a81b2669c16 100644
--- a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+++ b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock_irq(&task->sighand->siglock);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1348,6 +1348,18 @@ int migrate_swap(struct task_struct *cur
+@@ -1347,6 +1347,18 @@ int migrate_swap(struct task_struct *cur
}
#endif /* CONFIG_NUMA_BALANCING */
@@ -134,7 +134,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* wait_task_inactive - wait for a thread to unschedule.
*
-@@ -1392,7 +1404,7 @@ unsigned long wait_task_inactive(struct
+@@ -1391,7 +1403,7 @@ unsigned long wait_task_inactive(struct
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
@@ -143,7 +143,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
cpu_relax();
}
-@@ -1407,7 +1419,8 @@ unsigned long wait_task_inactive(struct
+@@ -1406,7 +1418,8 @@ unsigned long wait_task_inactive(struct
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
diff --git a/patches/random-make-it-work-on-rt.patch b/patches/random-make-it-work-on-rt.patch
index c498574b87da..68badf0da2d1 100644
--- a/patches/random-make-it-work-on-rt.patch
+++ b/patches/random-make-it-work-on-rt.patch
@@ -73,7 +73,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static int hv_ce_set_next_event(unsigned long delta,
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
-@@ -991,6 +991,8 @@ static void vmbus_isr(void)
+@@ -1011,6 +1011,8 @@ static void vmbus_isr(void)
void *page_addr = hv_cpu->synic_event_page;
struct hv_message *msg;
union hv_synic_event_flags *event;
@@ -82,7 +82,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
bool handled = false;
if (unlikely(page_addr == NULL))
-@@ -1034,7 +1036,7 @@ static void vmbus_isr(void)
+@@ -1054,7 +1056,7 @@ static void vmbus_isr(void)
tasklet_schedule(&hv_cpu->msg_dpc);
}
diff --git a/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch b/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch
index 68770bc080c9..9bae25ddb90f 100644
--- a/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch
+++ b/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch
@@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7139,6 +7139,47 @@ const u32 sched_prio_to_wmult[40] = {
+@@ -7138,6 +7138,47 @@ const u32 sched_prio_to_wmult[40] = {
#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
@@ -128,7 +128,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void migrate_disable(void)
{
struct task_struct *p = current;
-@@ -7162,10 +7203,9 @@ void migrate_disable(void)
+@@ -7161,10 +7202,9 @@ void migrate_disable(void)
}
preempt_disable();
@@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
preempt_enable();
}
-@@ -7197,9 +7237,8 @@ void migrate_enable(void)
+@@ -7196,9 +7236,8 @@ void migrate_enable(void)
preempt_disable();
diff --git a/patches/rtmutex-add-sleeping-lock-implementation.patch b/patches/rtmutex-add-sleeping-lock-implementation.patch
index 06083df1b175..b2930643f896 100644
--- a/patches/rtmutex-add-sleeping-lock-implementation.patch
+++ b/patches/rtmutex-add-sleeping-lock-implementation.patch
@@ -391,7 +391,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -1414,6 +1414,7 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1471,6 +1471,7 @@ static int wake_futex_pi(u32 __user *uad
struct task_struct *new_owner;
bool postunlock = false;
DEFINE_WAKE_Q(wake_q);
@@ -399,7 +399,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int ret = 0;
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
-@@ -1475,13 +1476,13 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1532,13 +1533,13 @@ static int wake_futex_pi(u32 __user *uad
pi_state->owner = new_owner;
raw_spin_unlock(&new_owner->pi_lock);
@@ -416,7 +416,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -2793,7 +2794,7 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2850,7 +2851,7 @@ static int futex_lock_pi(u32 __user *uad
goto no_block;
}
@@ -425,7 +425,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
-@@ -3165,7 +3166,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3222,7 +3223,7 @@ static int futex_wait_requeue_pi(u32 __u
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
@@ -1121,7 +1121,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
# include "rtmutex-debug.h"
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -401,9 +401,15 @@ static bool set_nr_if_polling(struct tas
+@@ -400,9 +400,15 @@ static bool set_nr_if_polling(struct tas
#endif
#endif
@@ -1139,7 +1139,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Atomically grab the task, if ->wake_q is !nil already it means
-@@ -425,24 +431,32 @@ void wake_q_add(struct wake_q_head *head
+@@ -424,24 +430,32 @@ void wake_q_add(struct wake_q_head *head
head->lastp = &node->next;
}
diff --git a/patches/rtmutex-annotate-sleeping-lock-context.patch b/patches/rtmutex-annotate-sleeping-lock-context.patch
index 9c96bc56e8a6..fb25f58a75eb 100644
--- a/patches/rtmutex-annotate-sleeping-lock-context.patch
+++ b/patches/rtmutex-annotate-sleeping-lock-context.patch
@@ -239,7 +239,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7308,4 +7308,49 @@ void migrate_enable(void)
+@@ -7307,4 +7307,49 @@ void migrate_enable(void)
preempt_enable();
}
EXPORT_SYMBOL(migrate_enable);
diff --git a/patches/rtmutex-futex-prepare-rt.patch b/patches/rtmutex-futex-prepare-rt.patch
index 23479b660bb5..9dd192089049 100644
--- a/patches/rtmutex-futex-prepare-rt.patch
+++ b/patches/rtmutex-futex-prepare-rt.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -2086,6 +2086,16 @@ static int futex_requeue(u32 __user *uad
+@@ -2143,6 +2143,16 @@ static int futex_requeue(u32 __user *uad
requeue_pi_wake_futex(this, &key2, hb2);
drop_count++;
continue;
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else if (ret) {
/*
* rt_mutex_start_proxy_lock() detected a
-@@ -3126,7 +3136,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3183,7 +3193,7 @@ static int futex_wait_requeue_pi(u32 __u
struct hrtimer_sleeper timeout, *to = NULL;
struct futex_pi_state *pi_state = NULL;
struct rt_mutex_waiter rt_waiter;
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
int res, ret;
-@@ -3184,20 +3194,55 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3241,20 +3251,55 @@ static int futex_wait_requeue_pi(u32 __u
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
@@ -108,7 +108,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
-@@ -3206,7 +3251,8 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3263,7 +3308,8 @@ static int futex_wait_requeue_pi(u32 __u
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
@@ -118,7 +118,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
ret = fixup_pi_state_owner(uaddr2, &q, current);
if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
pi_state = q.pi_state;
-@@ -3217,7 +3263,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3274,7 +3320,7 @@ static int futex_wait_requeue_pi(u32 __u
* the requeue_pi() code acquired for us.
*/
put_pi_state(q.pi_state);
@@ -127,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
} else {
struct rt_mutex *pi_mutex;
-@@ -3231,7 +3277,8 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3288,7 +3334,8 @@ static int futex_wait_requeue_pi(u32 __u
pi_mutex = &q.pi_state->pi_mutex;
ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
diff --git a/patches/sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch b/patches/sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch
index cc06ce835c6c..4d22d73725ff 100644
--- a/patches/sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch
+++ b/patches/sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -903,7 +903,7 @@ static inline bool is_cpu_allowed(struct
+@@ -902,7 +902,7 @@ static inline bool is_cpu_allowed(struct
if (!cpumask_test_cpu(cpu, p->cpus_ptr))
return false;
diff --git a/patches/sched-disable-rt-group-sched-on-rt.patch b/patches/sched-disable-rt-group-sched-on-rt.patch
index c4e678fbc3d7..b752b8171d2f 100644
--- a/patches/sched-disable-rt-group-sched-on-rt.patch
+++ b/patches/sched-disable-rt-group-sched-on-rt.patch
@@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -776,6 +776,7 @@ config CFS_BANDWIDTH
+@@ -781,6 +781,7 @@ config CFS_BANDWIDTH
config RT_GROUP_SCHED
bool "Group scheduling for SCHED_RR/FIFO"
depends on CGROUP_SCHED
diff --git a/patches/sched-fair-Make-the-hrtimers-non-hard-again.patch b/patches/sched-fair-Make-the-hrtimers-non-hard-again.patch
new file mode 100644
index 000000000000..db0c7613e11b
--- /dev/null
+++ b/patches/sched-fair-Make-the-hrtimers-non-hard-again.patch
@@ -0,0 +1,27 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 8 Jan 2019 12:31:06 +0100
+Subject: [PATCH] sched/fair: Make the hrtimers non-hard again
+
+Since commit "sched/fair: Robustify CFS-bandwidth timer locking" both
+hrtimer can run in softirq context because now interrupts are disabled
+as part of the locking procedure.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/sched/fair.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4880,9 +4880,9 @@ void init_cfs_bandwidth(struct cfs_bandw
+ cfs_b->period = ns_to_ktime(default_cfs_period());
+
+ INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
+- hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
++ hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
+ cfs_b->period_timer.function = sched_cfs_period_timer;
+- hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
++ hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ cfs_b->slack_timer.function = sched_cfs_slack_timer;
+ cfs_b->distribute_running = 0;
+ }
diff --git a/patches/sched-fair-Robustify-CFS-bandwidth-timer-locking.patch b/patches/sched-fair-Robustify-CFS-bandwidth-timer-locking.patch
new file mode 100644
index 000000000000..a4fa35304e84
--- /dev/null
+++ b/patches/sched-fair-Robustify-CFS-bandwidth-timer-locking.patch
@@ -0,0 +1,141 @@
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Mon, 7 Jan 2019 13:52:31 +0100
+Subject: [PATCH] sched/fair: Robustify CFS-bandwidth timer locking
+
+Traditionally hrtimer callbacks were run with IRQs disabled, but with
+the introduction of HRTIMER_MODE_SOFT it is possible they run from
+SoftIRQ context, which does _NOT_ have IRQs disabled.
+
+Allow for the CFS bandwidth timers (period_timer and slack_timer) to
+be ran from SoftIRQ context; this entails removing the assumption that
+IRQs are already disabled from the locking.
+
+While mainline doesn't strictly need this, -RT forces all timers not
+explicitly marked with MODE_HARD into MODE_SOFT and trips over this.
+And marking these timers as MODE_HARD doesn't make sense as they're
+not required for RT operation and can potentially be quite expensive.
+
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reported-by: Tom Putzeys <tom.putzeys@be.atlascopco.com>
+Tested-by: Mike Galbraith <efault@gmx.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20190107125231.GE14122@hirez.programming.kicks-ass.net
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/sched/fair.c | 30 ++++++++++++++++--------------
+ 1 file changed, 16 insertions(+), 14 deletions(-)
+
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4554,7 +4554,7 @@ static u64 distribute_cfs_runtime(struct
+ struct rq *rq = rq_of(cfs_rq);
+ struct rq_flags rf;
+
+- rq_lock(rq, &rf);
++ rq_lock_irqsave(rq, &rf);
+ if (!cfs_rq_throttled(cfs_rq))
+ goto next;
+
+@@ -4571,7 +4571,7 @@ static u64 distribute_cfs_runtime(struct
+ unthrottle_cfs_rq(cfs_rq);
+
+ next:
+- rq_unlock(rq, &rf);
++ rq_unlock_irqrestore(rq, &rf);
+
+ if (!remaining)
+ break;
+@@ -4587,7 +4587,7 @@ static u64 distribute_cfs_runtime(struct
+ * period the timer is deactivated until scheduling resumes; cfs_b->idle is
+ * used to track this state.
+ */
+-static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
++static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags)
+ {
+ u64 runtime, runtime_expires;
+ int throttled;
+@@ -4629,11 +4629,11 @@ static int do_sched_cfs_period_timer(str
+ while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) {
+ runtime = cfs_b->runtime;
+ cfs_b->distribute_running = 1;
+- raw_spin_unlock(&cfs_b->lock);
++ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
+ /* we can't nest cfs_b->lock while distributing bandwidth */
+ runtime = distribute_cfs_runtime(cfs_b, runtime,
+ runtime_expires);
+- raw_spin_lock(&cfs_b->lock);
++ raw_spin_lock_irqsave(&cfs_b->lock, flags);
+
+ cfs_b->distribute_running = 0;
+ throttled = !list_empty(&cfs_b->throttled_cfs_rq);
+@@ -4742,17 +4742,18 @@ static __always_inline void return_cfs_r
+ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
+ {
+ u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
++ unsigned long flags;
+ u64 expires;
+
+ /* confirm we're still not at a refresh boundary */
+- raw_spin_lock(&cfs_b->lock);
++ raw_spin_lock_irqsave(&cfs_b->lock, flags);
+ if (cfs_b->distribute_running) {
+- raw_spin_unlock(&cfs_b->lock);
++ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
+ return;
+ }
+
+ if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
+- raw_spin_unlock(&cfs_b->lock);
++ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
+ return;
+ }
+
+@@ -4763,18 +4764,18 @@ static void do_sched_cfs_slack_timer(str
+ if (runtime)
+ cfs_b->distribute_running = 1;
+
+- raw_spin_unlock(&cfs_b->lock);
++ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
+
+ if (!runtime)
+ return;
+
+ runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
+
+- raw_spin_lock(&cfs_b->lock);
++ raw_spin_lock_irqsave(&cfs_b->lock, flags);
+ if (expires == cfs_b->runtime_expires)
+ cfs_b->runtime -= min(runtime, cfs_b->runtime);
+ cfs_b->distribute_running = 0;
+- raw_spin_unlock(&cfs_b->lock);
++ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
+ }
+
+ /*
+@@ -4852,20 +4853,21 @@ static enum hrtimer_restart sched_cfs_pe
+ {
+ struct cfs_bandwidth *cfs_b =
+ container_of(timer, struct cfs_bandwidth, period_timer);
++ unsigned long flags;
+ int overrun;
+ int idle = 0;
+
+- raw_spin_lock(&cfs_b->lock);
++ raw_spin_lock_irqsave(&cfs_b->lock, flags);
+ for (;;) {
+ overrun = hrtimer_forward_now(timer, cfs_b->period);
+ if (!overrun)
+ break;
+
+- idle = do_sched_cfs_period_timer(cfs_b, overrun);
++ idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
+ }
+ if (idle)
+ cfs_b->period_active = 0;
+- raw_spin_unlock(&cfs_b->lock);
++ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
+
+ return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
+ }
diff --git a/patches/sched-might-sleep-do-not-account-rcu-depth.patch b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
index 7398ab2a60e8..48a9957febea 100644
--- a/patches/sched-might-sleep-do-not-account-rcu-depth.patch
+++ b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Internal to kernel */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -6155,7 +6155,7 @@ void __init sched_init(void)
+@@ -6154,7 +6154,7 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
diff --git a/patches/sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch b/patches/sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch
index f61bc0f6120c..274c8a0af112 100644
--- a/patches/sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch
+++ b/patches/sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1012,6 +1012,7 @@ int __migrate_disabled(struct task_struc
+@@ -1011,6 +1011,7 @@ int __migrate_disabled(struct task_struc
{
return p->migrate_disable;
}
diff --git a/patches/sched-migrate_disable-fallback-to-preempt_disable-in.patch b/patches/sched-migrate_disable-fallback-to-preempt_disable-in.patch
index ae024f27ca81..3b9f2326eaff 100644
--- a/patches/sched-migrate_disable-fallback-to-preempt_disable-in.patch
+++ b/patches/sched-migrate_disable-fallback-to-preempt_disable-in.patch
@@ -89,7 +89,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1030,7 +1030,7 @@ void set_cpus_allowed_common(struct task
+@@ -1029,7 +1029,7 @@ void set_cpus_allowed_common(struct task
p->nr_cpus_allowed = cpumask_weight(new_mask);
}
@@ -98,7 +98,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int __migrate_disabled(struct task_struct *p)
{
return p->migrate_disable;
-@@ -1070,7 +1070,7 @@ static void __do_set_cpus_allowed_tail(s
+@@ -1069,7 +1069,7 @@ static void __do_set_cpus_allowed_tail(s
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
@@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (__migrate_disabled(p)) {
lockdep_assert_held(&p->pi_lock);
-@@ -1143,7 +1143,7 @@ static int __set_cpus_allowed_ptr(struct
+@@ -1142,7 +1142,7 @@ static int __set_cpus_allowed_ptr(struct
if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
goto out;
@@ -116,7 +116,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (__migrate_disabled(p)) {
p->migrate_disable_update = 1;
goto out;
-@@ -7164,7 +7164,7 @@ const u32 sched_prio_to_wmult[40] = {
+@@ -7163,7 +7163,7 @@ const u32 sched_prio_to_wmult[40] = {
#undef CREATE_TRACE_POINTS
@@ -125,7 +125,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline void
update_nr_migratory(struct task_struct *p, long delta)
-@@ -7312,45 +7312,44 @@ EXPORT_SYMBOL(migrate_enable);
+@@ -7311,45 +7311,44 @@ EXPORT_SYMBOL(migrate_enable);
#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
void migrate_disable(void)
{
diff --git a/patches/sched-mmdrop-delayed.patch b/patches/sched-mmdrop-delayed.patch
index d4b2df873e4f..ce90eb91a5fa 100644
--- a/patches/sched-mmdrop-delayed.patch
+++ b/patches/sched-mmdrop-delayed.patch
@@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct mm_struct *mm;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2728,9 +2728,13 @@ static struct rq *finish_task_switch(str
+@@ -2727,9 +2727,13 @@ static struct rq *finish_task_switch(str
* provided by mmdrop(),
* - a sync_core for SYNC_CORE.
*/
@@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead)
-@@ -5558,6 +5562,8 @@ void sched_setnuma(struct task_struct *p
+@@ -5557,6 +5561,8 @@ void sched_setnuma(struct task_struct *p
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_HOTPLUG_CPU
@@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Ensure that the idle task is using init_mm right before its CPU goes
* offline.
-@@ -5573,7 +5579,11 @@ void idle_task_exit(void)
+@@ -5572,7 +5578,11 @@ void idle_task_exit(void)
current->active_mm = &init_mm;
finish_arch_post_lock_switch();
}
@@ -114,7 +114,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -5885,6 +5895,10 @@ int sched_cpu_dying(unsigned int cpu)
+@@ -5884,6 +5894,10 @@ int sched_cpu_dying(unsigned int cpu)
update_max_interval();
nohz_balance_exit_idle(rq);
hrtick_clear(rq);
diff --git a/patches/sched-rt-mutex-wakeup.patch b/patches/sched-rt-mutex-wakeup.patch
index 5117425814c5..0127fdc752d8 100644
--- a/patches/sched-rt-mutex-wakeup.patch
+++ b/patches/sched-rt-mutex-wakeup.patch
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1998,8 +1998,27 @@ try_to_wake_up(struct task_struct *p, un
+@@ -1997,8 +1997,27 @@ try_to_wake_up(struct task_struct *p, un
*/
raw_spin_lock_irqsave(&p->pi_lock, flags);
smp_mb__after_spinlock();
@@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
trace_sched_waking(p);
-@@ -2163,6 +2182,18 @@ int wake_up_process(struct task_struct *
+@@ -2162,6 +2181,18 @@ int wake_up_process(struct task_struct *
}
EXPORT_SYMBOL(wake_up_process);
@@ -86,7 +86,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return try_to_wake_up(p, state, 0);
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1444,6 +1444,7 @@ static inline int task_on_rq_migrating(s
+@@ -1443,6 +1443,7 @@ static inline int task_on_rq_migrating(s
#define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */
#define WF_FORK 0x02 /* Child wakeup after fork */
#define WF_MIGRATED 0x4 /* Internal use, task got migrated */
diff --git a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
index f53b321ce5f2..7c3a3860dc74 100644
--- a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
+++ b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3497,8 +3497,10 @@ static void __sched notrace __schedule(b
+@@ -3496,8 +3496,10 @@ static void __sched notrace __schedule(b
* If a worker went to sleep, notify and ask workqueue
* whether it wants to wake up a task to maintain
* concurrency.
diff --git a/patches/series b/patches/series
index 69646621fe83..44ec56de37d0 100644
--- a/patches/series
+++ b/patches/series
@@ -24,6 +24,7 @@ irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch
kthread-convert-worker-lock-to-raw-spinlock.patch
pinctrl-bcm2835-Use-raw-spinlock-for-RT-compatibilit.patch
crypto-caam-qi-simplify-CGR-allocation-freeing.patch
+sched-fair-Robustify-CFS-bandwidth-timer-locking.patch
############################################################
# POSTED
@@ -164,6 +165,7 @@ slub-disable-SLUB_CPU_PARTIAL.patch
mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
mm-memcontrol-do_not_disable_irq.patch
mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch
+# check in next release if it is still required.
x86-mm-pat-disable-preemption-__split_large_page-aft.patch
# RADIX TREE
@@ -189,6 +191,7 @@ time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch
hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
hrtimers-prepare-full-preemption.patch
hrtimer-by-timers-by-default-into-the-softirq-context.patch
+sched-fair-Make-the-hrtimers-non-hard-again.patch
hrtimer-Move-schedule_work-call-to-helper-thread.patch
hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch
@@ -252,6 +255,7 @@ rtmutex-add-ww_mutex-addon-for-mutex-rt.patch
# Allow to enable RT-FULL after sleeping spinlocks are wired up
kconfig-preempt-rt-full.patch
locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch
+locking-rt-mutex-Flush-block-plug-on-__down_read.patch
locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch
ptrace-fix-ptrace-vs-tasklist_lock-race.patch
rtmutex-annotate-sleeping-lock-context.patch
diff --git a/patches/slub-disable-SLUB_CPU_PARTIAL.patch b/patches/slub-disable-SLUB_CPU_PARTIAL.patch
index 94e14bea5b42..60248ae03d5c 100644
--- a/patches/slub-disable-SLUB_CPU_PARTIAL.patch
+++ b/patches/slub-disable-SLUB_CPU_PARTIAL.patch
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1692,7 +1692,7 @@ config SLAB_FREELIST_HARDENED
+@@ -1697,7 +1697,7 @@ config SLAB_FREELIST_HARDENED
config SLUB_CPU_PARTIAL
default y
diff --git a/patches/timers-prepare-for-full-preemption.patch b/patches/timers-prepare-for-full-preemption.patch
index e8682c83ae64..f8a40a25bc6f 100644
--- a/patches/timers-prepare-for-full-preemption.patch
+++ b/patches/timers-prepare-for-full-preemption.patch
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
# define del_timer_sync(t) del_timer(t)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -497,11 +497,14 @@ void resched_cpu(int cpu)
+@@ -496,11 +496,14 @@ void resched_cpu(int cpu)
*/
int get_nohz_timer_target(void)
{
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
for_each_domain(cpu, sd) {
-@@ -520,6 +523,8 @@ int get_nohz_timer_target(void)
+@@ -519,6 +522,8 @@ int get_nohz_timer_target(void)
cpu = housekeeping_any_cpu(HK_FLAG_TIMER);
unlock:
rcu_read_unlock();
diff --git a/patches/workqueue-distangle-from-rq-lock.patch b/patches/workqueue-distangle-from-rq-lock.patch
index b976f3198b6e..1d916c0b93f6 100644
--- a/patches/workqueue-distangle-from-rq-lock.patch
+++ b/patches/workqueue-distangle-from-rq-lock.patch
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1703,10 +1703,6 @@ static inline void ttwu_activate(struct
+@@ -1702,10 +1702,6 @@ static inline void ttwu_activate(struct
{
activate_task(rq, p, en_flags);
p->on_rq = TASK_ON_RQ_QUEUED;
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2143,56 +2139,6 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2142,56 +2138,6 @@ try_to_wake_up(struct task_struct *p, un
}
/**
@@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
*
-@@ -3519,21 +3465,6 @@ static void __sched notrace __schedule(b
+@@ -3518,21 +3464,6 @@ static void __sched notrace __schedule(b
atomic_inc(&rq->nr_iowait);
delayacct_blkio_start();
}
@@ -123,7 +123,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
switch_count = &prev->nvcsw;
}
-@@ -3593,6 +3524,20 @@ static inline void sched_submit_work(str
+@@ -3592,6 +3523,20 @@ static inline void sched_submit_work(str
{
if (!tsk->state || tsk_is_pi_blocked(tsk))
return;
@@ -144,7 +144,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
-@@ -3601,6 +3546,12 @@ static inline void sched_submit_work(str
+@@ -3600,6 +3545,12 @@ static inline void sched_submit_work(str
blk_schedule_flush_plug(tsk);
}
@@ -157,7 +157,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
asmlinkage __visible void __sched schedule(void)
{
struct task_struct *tsk = current;
-@@ -3611,6 +3562,7 @@ asmlinkage __visible void __sched schedu
+@@ -3610,6 +3561,7 @@ asmlinkage __visible void __sched schedu
__schedule(false);
sched_preempt_enable_no_resched();
} while (need_resched());
diff --git a/patches/workqueue-prevent-deadlock-stall.patch b/patches/workqueue-prevent-deadlock-stall.patch
index 942f2b1d6dbf..478256a093e9 100644
--- a/patches/workqueue-prevent-deadlock-stall.patch
+++ b/patches/workqueue-prevent-deadlock-stall.patch
@@ -43,7 +43,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3568,9 +3568,8 @@ void __noreturn do_task_dead(void)
+@@ -3567,9 +3567,8 @@ void __noreturn do_task_dead(void)
static inline void sched_submit_work(struct task_struct *tsk)
{
@@ -54,7 +54,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
/*
* If a worker went to sleep, notify and ask workqueue whether
* it wants to wake up a task to maintain concurrency.
-@@ -3584,6 +3583,9 @@ static inline void sched_submit_work(str
+@@ -3583,6 +3582,9 @@ static inline void sched_submit_work(str
preempt_enable_no_resched();
}
diff --git a/patches/x86-kvm-require-const-tsc-for-rt.patch b/patches/x86-kvm-require-const-tsc-for-rt.patch
index 22fc8a75478d..5ec302edb421 100644
--- a/patches/x86-kvm-require-const-tsc-for-rt.patch
+++ b/patches/x86-kvm-require-const-tsc-for-rt.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -6690,6 +6690,13 @@ int kvm_arch_init(void *opaque)
+@@ -6692,6 +6692,13 @@ int kvm_arch_init(void *opaque)
goto out;
}