summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--patches/0002-sched-Fix-balance_callback.patch18
-rw-r--r--patches/0003-sched-hotplug-Ensure-only-per-cpu-kthreads-run-durin.patch16
-rw-r--r--patches/0004-sched-core-Wait-for-tasks-being-pushed-away-on-hotpl.patch10
-rw-r--r--patches/0005-locking-rtmutex-Handle-the-various-new-futex-race-co.patch12
-rw-r--r--patches/0006-sched-hotplug-Consolidate-task-migration-on-CPU-unpl.patch8
-rw-r--r--patches/0007-sched-Fix-hotplug-vs-CPU-bandwidth-control.patch8
-rw-r--r--patches/0008-sched-Massage-set_cpus_allowed.patch8
-rw-r--r--patches/0009-sched-Add-migrate_disable.patch4
-rw-r--r--patches/0010-sched-Fix-migrate_disable-vs-set_cpus_allowed_ptr.patch4
-rw-r--r--patches/0011-sched-core-Make-migrate-disable-and-CPU-hotplug-coop.patch6
-rw-r--r--patches/0012-sched-rt-Use-cpumask_any-_distribute.patch4
-rw-r--r--patches/0012-softirq-Add-RT-specific-softirq-accounting.patch2
-rw-r--r--patches/0013-sched-rt-Use-the-full-cpumask-for-balancing.patch4
-rw-r--r--patches/0014-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch10
-rw-r--r--patches/0015-locking-rtmutex-add-sleeping-lock-implementation.patch10
-rw-r--r--patches/0015-powerpc-mm-highmem-Switch-to-generic-kmap-atomic.patch4
-rw-r--r--patches/0015-sched-Fix-migrate_disable-vs-rt-dl-balancing.patch12
-rw-r--r--patches/0017-sched-Add-migrate_disable-tracepoints.patch2
-rw-r--r--patches/0022-locking-rtmutex-Use-custom-scheduling-function-for-s.patch20
-rw-r--r--patches/0023-sched-Make-migrate_disable-enable-independent-of-RT.patch10
-rw-r--r--patches/0024-sched-highmem-Store-local-kmaps-in-task-struct.patch10
-rw-r--r--patches/0036-drm-i915-Replace-io_mapping_map_atomic_wc.patch4
-rw-r--r--patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch6
-rw-r--r--patches/clk-imx8qxp-Unbreak-auto-module-building-for-MXC_CLK.patch75
-rw-r--r--patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch2
-rw-r--r--patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch2
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/lockdep-no-softirq-accounting-on-rt.patch4
-rw-r--r--patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch4
-rw-r--r--patches/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch22
-rw-r--r--patches/mm-memcontrol-do_not_disable_irq.patch12
-rw-r--r--patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch4
-rw-r--r--patches/oleg-signal-rt-fix.patch2
-rw-r--r--patches/preempt-lazy-support.patch20
-rw-r--r--patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch8
-rw-r--r--patches/sched-Fix-migration_cpu_stop-WARN.patch40
-rw-r--r--patches/sched-core-Add-missing-completion-for-affine_move_ta.patch72
-rw-r--r--patches/sched-might-sleep-do-not-account-rcu-depth.patch2
-rw-r--r--patches/sched-mmdrop-delayed.patch4
-rw-r--r--patches/series7
-rw-r--r--patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch2
-rw-r--r--patches/x86-kvm-require-const-tsc-for-rt.patch2
42 files changed, 336 insertions, 142 deletions
diff --git a/patches/0002-sched-Fix-balance_callback.patch b/patches/0002-sched-Fix-balance_callback.patch
index f248242c0770..efdaf21db268 100644
--- a/patches/0002-sched-Fix-balance_callback.patch
+++ b/patches/0002-sched-Fix-balance_callback.patch
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3485,6 +3485,69 @@ static inline void finish_task(struct ta
+@@ -3490,6 +3490,69 @@ static inline void finish_task(struct ta
#endif
}
@@ -98,7 +98,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline void
prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
{
-@@ -3510,6 +3573,7 @@ static inline void finish_lock_switch(st
+@@ -3515,6 +3578,7 @@ static inline void finish_lock_switch(st
* prev into current:
*/
spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
@@ -106,7 +106,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock_irq(&rq->lock);
}
-@@ -3651,43 +3715,6 @@ static struct rq *finish_task_switch(str
+@@ -3656,43 +3720,6 @@ static struct rq *finish_task_switch(str
return rq;
}
@@ -150,7 +150,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* schedule_tail - first thing a freshly forked thread must call.
* @prev: the thread we just switched away from.
-@@ -3707,7 +3734,6 @@ asmlinkage __visible void schedule_tail(
+@@ -3712,7 +3739,6 @@ asmlinkage __visible void schedule_tail(
*/
rq = finish_task_switch(prev);
@@ -158,7 +158,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
preempt_enable();
if (current->set_child_tid)
-@@ -4523,10 +4549,11 @@ static void __sched notrace __schedule(b
+@@ -4528,10 +4554,11 @@ static void __sched notrace __schedule(b
rq = context_switch(rq, prev, next, &rf);
} else {
rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
@@ -173,7 +173,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
void __noreturn do_task_dead(void)
-@@ -4937,9 +4964,11 @@ void rt_mutex_setprio(struct task_struct
+@@ -4943,9 +4970,11 @@ void rt_mutex_setprio(struct task_struct
out_unlock:
/* Avoid rq from going away on us: */
preempt_disable();
@@ -187,7 +187,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
preempt_enable();
}
#else
-@@ -5213,6 +5242,7 @@ static int __sched_setscheduler(struct t
+@@ -5219,6 +5248,7 @@ static int __sched_setscheduler(struct t
int retval, oldprio, oldpolicy = -1, queued, running;
int new_effective_prio, policy = attr->sched_policy;
const struct sched_class *prev_class;
@@ -195,7 +195,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct rq_flags rf;
int reset_on_fork;
int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
-@@ -5451,6 +5481,7 @@ static int __sched_setscheduler(struct t
+@@ -5457,6 +5487,7 @@ static int __sched_setscheduler(struct t
/* Avoid rq from going away on us: */
preempt_disable();
@@ -203,7 +203,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
task_rq_unlock(rq, p, &rf);
if (pi) {
-@@ -5459,7 +5490,7 @@ static int __sched_setscheduler(struct t
+@@ -5465,7 +5496,7 @@ static int __sched_setscheduler(struct t
}
/* Run balance callbacks after we've adjusted the PI chain: */
diff --git a/patches/0003-sched-hotplug-Ensure-only-per-cpu-kthreads-run-durin.patch b/patches/0003-sched-hotplug-Ensure-only-per-cpu-kthreads-run-durin.patch
index 5ae4e533e064..8dfee813f4f0 100644
--- a/patches/0003-sched-hotplug-Ensure-only-per-cpu-kthreads-run-durin.patch
+++ b/patches/0003-sched-hotplug-Ensure-only-per-cpu-kthreads-run-durin.patch
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3509,8 +3509,10 @@ static inline struct callback_head *spli
+@@ -3514,8 +3514,10 @@ static inline struct callback_head *spli
struct callback_head *head = rq->balance_callback;
lockdep_assert_held(&rq->lock);
@@ -39,7 +39,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return head;
}
-@@ -3531,6 +3533,21 @@ static inline void balance_callbacks(str
+@@ -3536,6 +3538,21 @@ static inline void balance_callbacks(str
}
}
@@ -61,7 +61,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#else
static inline void __balance_callbacks(struct rq *rq)
-@@ -3546,6 +3563,10 @@ static inline void balance_callbacks(str
+@@ -3551,6 +3568,10 @@ static inline void balance_callbacks(str
{
}
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
static inline void
-@@ -3573,7 +3594,7 @@ static inline void finish_lock_switch(st
+@@ -3578,7 +3599,7 @@ static inline void finish_lock_switch(st
* prev into current:
*/
spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock_irq(&rq->lock);
}
-@@ -6832,6 +6853,90 @@ static void migrate_tasks(struct rq *dea
+@@ -6838,6 +6859,90 @@ static void migrate_tasks(struct rq *dea
rq->stop = stop;
}
@@ -172,7 +172,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* CONFIG_HOTPLUG_CPU */
void set_rq_online(struct rq *rq)
-@@ -6917,6 +7022,8 @@ int sched_cpu_activate(unsigned int cpu)
+@@ -6923,6 +7028,8 @@ int sched_cpu_activate(unsigned int cpu)
struct rq *rq = cpu_rq(cpu);
struct rq_flags rf;
@@ -181,7 +181,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_SCHED_SMT
/*
* When going up, increment the number of cores with SMT present.
-@@ -6964,6 +7071,8 @@ int sched_cpu_deactivate(unsigned int cp
+@@ -6970,6 +7077,8 @@ int sched_cpu_deactivate(unsigned int cp
*/
synchronize_rcu();
@@ -190,7 +190,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_SCHED_SMT
/*
* When going down, decrement the number of cores with SMT present.
-@@ -6977,6 +7086,7 @@ int sched_cpu_deactivate(unsigned int cp
+@@ -6983,6 +7092,7 @@ int sched_cpu_deactivate(unsigned int cp
ret = cpuset_cpu_inactive(cpu);
if (ret) {
diff --git a/patches/0004-sched-core-Wait-for-tasks-being-pushed-away-on-hotpl.patch b/patches/0004-sched-core-Wait-for-tasks-being-pushed-away-on-hotpl.patch
index 27c8eb6b73f7..5c1928da8242 100644
--- a/patches/0004-sched-core-Wait-for-tasks-being-pushed-away-on-hotpl.patch
+++ b/patches/0004-sched-core-Wait-for-tasks-being-pushed-away-on-hotpl.patch
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -6895,8 +6895,21 @@ static void balance_push(struct rq *rq)
+@@ -6901,8 +6901,21 @@ static void balance_push(struct rq *rq)
* Both the cpu-hotplug and stop task are in this case and are
* required to complete the hotplug process.
*/
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
get_task_struct(push_task);
/*
-@@ -6927,6 +6940,20 @@ static void balance_push_set(int cpu, bo
+@@ -6933,6 +6946,20 @@ static void balance_push_set(int cpu, bo
rq_unlock_irqrestore(rq, &rf);
}
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#else
static inline void balance_push(struct rq *rq)
-@@ -6937,6 +6964,10 @@ static inline void balance_push_set(int
+@@ -6943,6 +6970,10 @@ static inline void balance_push_set(int
{
}
@@ -78,7 +78,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* CONFIG_HOTPLUG_CPU */
void set_rq_online(struct rq *rq)
-@@ -7091,6 +7122,10 @@ int sched_cpu_deactivate(unsigned int cp
+@@ -7097,6 +7128,10 @@ int sched_cpu_deactivate(unsigned int cp
return ret;
}
sched_domains_numa_masks_clear(cpu);
@@ -89,7 +89,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -7331,6 +7366,9 @@ void __init sched_init(void)
+@@ -7337,6 +7372,9 @@ void __init sched_init(void)
rq_csd_init(rq, &rq->nohz_csd, nohz_csd_func);
#endif
diff --git a/patches/0005-locking-rtmutex-Handle-the-various-new-futex-race-co.patch b/patches/0005-locking-rtmutex-Handle-the-various-new-futex-race-co.patch
index 0890fb016a09..73abd17ed651 100644
--- a/patches/0005-locking-rtmutex-Handle-the-various-new-futex-race-co.patch
+++ b/patches/0005-locking-rtmutex-Handle-the-various-new-futex-race-co.patch
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -2148,6 +2148,16 @@ static int futex_requeue(u32 __user *uad
+@@ -2149,6 +2149,16 @@ static int futex_requeue(u32 __user *uad
*/
requeue_pi_wake_futex(this, &key2, hb2);
continue;
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else if (ret) {
/*
* rt_mutex_start_proxy_lock() detected a
-@@ -3186,7 +3196,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3187,7 +3197,7 @@ static int futex_wait_requeue_pi(u32 __u
struct hrtimer_sleeper timeout, *to;
struct futex_pi_state *pi_state = NULL;
struct rt_mutex_waiter rt_waiter;
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
int res, ret;
-@@ -3238,20 +3248,55 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3239,20 +3249,55 @@ static int futex_wait_requeue_pi(u32 __u
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
@@ -109,7 +109,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
-@@ -3260,7 +3305,8 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3261,7 +3306,8 @@ static int futex_wait_requeue_pi(u32 __u
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
@@ -119,7 +119,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
ret = fixup_pi_state_owner(uaddr2, &q, current);
if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
pi_state = q.pi_state;
-@@ -3271,7 +3317,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3272,7 +3318,7 @@ static int futex_wait_requeue_pi(u32 __u
* the requeue_pi() code acquired for us.
*/
put_pi_state(q.pi_state);
@@ -128,7 +128,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
} else {
struct rt_mutex *pi_mutex;
-@@ -3285,7 +3331,8 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3286,7 +3332,8 @@ static int futex_wait_requeue_pi(u32 __u
pi_mutex = &q.pi_state->pi_mutex;
ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
diff --git a/patches/0006-sched-hotplug-Consolidate-task-migration-on-CPU-unpl.patch b/patches/0006-sched-hotplug-Consolidate-task-migration-on-CPU-unpl.patch
index 5d239be40873..28621da2f5e5 100644
--- a/patches/0006-sched-hotplug-Consolidate-task-migration-on-CPU-unpl.patch
+++ b/patches/0006-sched-hotplug-Consolidate-task-migration-on-CPU-unpl.patch
@@ -85,7 +85,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
.name = "smpboot/threads:online",
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -6740,120 +6740,6 @@ void idle_task_exit(void)
+@@ -6746,120 +6746,6 @@ void idle_task_exit(void)
/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
}
@@ -206,7 +206,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static int __balance_push_cpu_stop(void *arg)
{
struct task_struct *p = arg;
-@@ -7122,10 +7008,6 @@ int sched_cpu_deactivate(unsigned int cp
+@@ -7128,10 +7014,6 @@ int sched_cpu_deactivate(unsigned int cp
return ret;
}
sched_domains_numa_masks_clear(cpu);
@@ -217,7 +217,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -7145,6 +7027,41 @@ int sched_cpu_starting(unsigned int cpu)
+@@ -7151,6 +7033,41 @@ int sched_cpu_starting(unsigned int cpu)
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -259,7 +259,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int sched_cpu_dying(unsigned int cpu)
{
struct rq *rq = cpu_rq(cpu);
-@@ -7158,7 +7075,6 @@ int sched_cpu_dying(unsigned int cpu)
+@@ -7164,7 +7081,6 @@ int sched_cpu_dying(unsigned int cpu)
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_offline(rq);
}
diff --git a/patches/0007-sched-Fix-hotplug-vs-CPU-bandwidth-control.patch b/patches/0007-sched-Fix-hotplug-vs-CPU-bandwidth-control.patch
index caa6d17e4c0f..ea4419d2d12c 100644
--- a/patches/0007-sched-Fix-hotplug-vs-CPU-bandwidth-control.patch
+++ b/patches/0007-sched-Fix-hotplug-vs-CPU-bandwidth-control.patch
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -6976,6 +6976,8 @@ int sched_cpu_activate(unsigned int cpu)
+@@ -6982,6 +6982,8 @@ int sched_cpu_activate(unsigned int cpu)
int sched_cpu_deactivate(unsigned int cpu)
{
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int ret;
set_cpu_active(cpu, false);
-@@ -6990,6 +6992,14 @@ int sched_cpu_deactivate(unsigned int cp
+@@ -6996,6 +6998,14 @@ int sched_cpu_deactivate(unsigned int cp
balance_push_set(cpu, true);
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_SCHED_SMT
/*
* When going down, decrement the number of cores with SMT present.
-@@ -7071,10 +7081,6 @@ int sched_cpu_dying(unsigned int cpu)
+@@ -7077,10 +7087,6 @@ int sched_cpu_dying(unsigned int cpu)
sched_tick_stop(cpu);
rq_lock_irqsave(rq, &rf);
@@ -61,7 +61,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
-@@ -543,7 +543,7 @@ static int push_dl_task(struct rq *rq);
+@@ -565,7 +565,7 @@ static int push_dl_task(struct rq *rq);
static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
{
diff --git a/patches/0008-sched-Massage-set_cpus_allowed.patch b/patches/0008-sched-Massage-set_cpus_allowed.patch
index d960d19a7b00..9d33beb7f341 100644
--- a/patches/0008-sched-Massage-set_cpus_allowed.patch
+++ b/patches/0008-sched-Massage-set_cpus_allowed.patch
@@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
return set_cpus_allowed_ptr(p, new_mask);
}
-@@ -6006,7 +6014,7 @@ long sched_setaffinity(pid_t pid, const
+@@ -6012,7 +6020,7 @@ long sched_setaffinity(pid_t pid, const
}
#endif
again:
@@ -109,7 +109,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!retval) {
cpuset_cpus_allowed(p, cpus_allowed);
-@@ -6589,7 +6597,7 @@ void init_idle(struct task_struct *idle,
+@@ -6595,7 +6603,7 @@ void init_idle(struct task_struct *idle,
*
* And since this is boot we can forgo the serialization.
*/
@@ -120,7 +120,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* We're having a chicken and egg problem, even though we are
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
-@@ -2301,7 +2301,8 @@ static void task_woken_dl(struct rq *rq,
+@@ -2307,7 +2307,8 @@ static void task_woken_dl(struct rq *rq,
}
static void set_cpus_allowed_dl(struct task_struct *p,
@@ -130,7 +130,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct root_domain *src_rd;
struct rq *rq;
-@@ -2330,7 +2331,7 @@ static void set_cpus_allowed_dl(struct t
+@@ -2336,7 +2337,7 @@ static void set_cpus_allowed_dl(struct t
raw_spin_unlock(&src_dl_b->lock);
}
diff --git a/patches/0009-sched-Add-migrate_disable.patch b/patches/0009-sched-Add-migrate_disable.patch
index 1b6ba254bc9a..98318d489643 100644
--- a/patches/0009-sched-Add-migrate_disable.patch
+++ b/patches/0009-sched-Add-migrate_disable.patch
@@ -102,7 +102,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* __LINUX_PREEMPT_H */
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -714,6 +714,9 @@ struct task_struct {
+@@ -722,6 +722,9 @@ struct task_struct {
int nr_cpus_allowed;
const cpumask_t *cpus_ptr;
cpumask_t cpus_mask;
@@ -296,7 +296,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void
ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
{
-@@ -4570,6 +4669,7 @@ static void __sched notrace __schedule(b
+@@ -4575,6 +4674,7 @@ static void __sched notrace __schedule(b
*/
++*switch_count;
diff --git a/patches/0010-sched-Fix-migrate_disable-vs-set_cpus_allowed_ptr.patch b/patches/0010-sched-Fix-migrate_disable-vs-set_cpus_allowed_ptr.patch
index b460065d96e1..53631dfbb939 100644
--- a/patches/0010-sched-Fix-migrate_disable-vs-set_cpus_allowed_ptr.patch
+++ b/patches/0010-sched-Fix-migrate_disable-vs-set_cpus_allowed_ptr.patch
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -714,6 +714,7 @@ struct task_struct {
+@@ -722,6 +722,7 @@ struct task_struct {
int nr_cpus_allowed;
const cpumask_t *cpus_ptr;
cpumask_t cpus_mask;
@@ -350,7 +350,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
out:
task_rq_unlock(rq, p, &rf);
-@@ -3205,6 +3378,7 @@ static void __sched_fork(unsigned long c
+@@ -3210,6 +3383,7 @@ static void __sched_fork(unsigned long c
init_numa_balancing(clone_flags, p);
#ifdef CONFIG_SMP
p->wake_entry.u_flags = CSD_TYPE_TTWU;
diff --git a/patches/0011-sched-core-Make-migrate-disable-and-CPU-hotplug-coop.patch b/patches/0011-sched-core-Make-migrate-disable-and-CPU-hotplug-coop.patch
index 7488fa181b09..62cdb031e3b2 100644
--- a/patches/0011-sched-core-Make-migrate-disable-and-CPU-hotplug-coop.patch
+++ b/patches/0011-sched-core-Make-migrate-disable-and-CPU-hotplug-coop.patch
@@ -71,7 +71,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
static void
-@@ -7063,15 +7081,20 @@ static void balance_push(struct rq *rq)
+@@ -7069,15 +7087,20 @@ static void balance_push(struct rq *rq)
* Both the cpu-hotplug and stop task are in this case and are
* required to complete the hotplug process.
*/
@@ -94,7 +94,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock(&rq->lock);
rcuwait_wake_up(&rq->hotplug_wait);
raw_spin_lock(&rq->lock);
-@@ -7118,7 +7141,8 @@ static void balance_hotplug_wait(void)
+@@ -7124,7 +7147,8 @@ static void balance_hotplug_wait(void)
{
struct rq *rq = this_rq();
@@ -104,7 +104,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
TASK_UNINTERRUPTIBLE);
}
-@@ -7363,7 +7387,7 @@ int sched_cpu_dying(unsigned int cpu)
+@@ -7369,7 +7393,7 @@ int sched_cpu_dying(unsigned int cpu)
sched_tick_stop(cpu);
rq_lock_irqsave(rq, &rf);
diff --git a/patches/0012-sched-rt-Use-cpumask_any-_distribute.patch b/patches/0012-sched-rt-Use-cpumask_any-_distribute.patch
index 9df553b579a4..043bf8e2f4eb 100644
--- a/patches/0012-sched-rt-Use-cpumask_any-_distribute.patch
+++ b/patches/0012-sched-rt-Use-cpumask_any-_distribute.patch
@@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* for_each_cpu - iterate over every cpu in a mask
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
-@@ -2002,8 +2002,8 @@ static int find_later_rq(struct task_str
+@@ -2008,8 +2008,8 @@ static int find_later_rq(struct task_str
return this_cpu;
}
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Last chance: if a CPU being in both later_mask
* and current sd span is valid, that becomes our
-@@ -2025,7 +2025,7 @@ static int find_later_rq(struct task_str
+@@ -2031,7 +2031,7 @@ static int find_later_rq(struct task_str
if (this_cpu != -1)
return this_cpu;
diff --git a/patches/0012-softirq-Add-RT-specific-softirq-accounting.patch b/patches/0012-softirq-Add-RT-specific-softirq-accounting.patch
index 6bd236429a13..419be259b505 100644
--- a/patches/0012-softirq-Add-RT-specific-softirq-accounting.patch
+++ b/patches/0012-softirq-Add-RT-specific-softirq-accounting.patch
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1017,6 +1017,9 @@ struct task_struct {
+@@ -1039,6 +1039,9 @@ struct task_struct {
int softirq_context;
int irq_config;
#endif
diff --git a/patches/0013-sched-rt-Use-the-full-cpumask-for-balancing.patch b/patches/0013-sched-rt-Use-the-full-cpumask-for-balancing.patch
index 4578a7191664..ba9d527dfbab 100644
--- a/patches/0013-sched-rt-Use-the-full-cpumask-for-balancing.patch
+++ b/patches/0013-sched-rt-Use-the-full-cpumask-for-balancing.patch
@@ -52,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* We have to ensure that we have at least one bit
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
-@@ -1912,7 +1912,7 @@ static void task_fork_dl(struct task_str
+@@ -1918,7 +1918,7 @@ static void task_fork_dl(struct task_str
static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
{
if (!task_running(rq, p) &&
@@ -61,7 +61,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 1;
return 0;
}
-@@ -2062,7 +2062,7 @@ static struct rq *find_lock_later_rq(str
+@@ -2068,7 +2068,7 @@ static struct rq *find_lock_later_rq(str
/* Retry if something changed. */
if (double_lock_balance(rq, later_rq)) {
if (unlikely(task_rq(task) != rq ||
diff --git a/patches/0014-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch b/patches/0014-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch
index 0093030dc905..fa294e0f5b19 100644
--- a/patches/0014-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch
+++ b/patches/0014-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch
@@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -647,6 +647,8 @@ struct task_struct {
+@@ -655,6 +655,8 @@ struct task_struct {
#endif
/* -1 unrunnable, 0 runnable, >0 stopped: */
volatile long state;
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* This begins the randomizable portion of task_struct. Only
-@@ -1750,6 +1752,7 @@ extern struct task_struct *find_get_task
+@@ -1772,6 +1774,7 @@ extern struct task_struct *find_get_task
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3268,7 +3268,7 @@ try_to_wake_up(struct task_struct *p, un
+@@ -3284,7 +3284,7 @@ try_to_wake_up(struct task_struct *p, un
int cpu, success = 0;
preempt_disable();
@@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* We're waking current, this means 'p->on_rq' and 'task_cpu(p)
* == smp_processor_id()'. Together this means we can special
-@@ -3298,8 +3298,26 @@ try_to_wake_up(struct task_struct *p, un
+@@ -3314,8 +3314,26 @@ try_to_wake_up(struct task_struct *p, un
*/
raw_spin_lock_irqsave(&p->pi_lock, flags);
smp_mb__after_spinlock();
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
trace_sched_waking(p);
-@@ -3489,6 +3507,18 @@ int wake_up_process(struct task_struct *
+@@ -3505,6 +3523,18 @@ int wake_up_process(struct task_struct *
}
EXPORT_SYMBOL(wake_up_process);
diff --git a/patches/0015-locking-rtmutex-add-sleeping-lock-implementation.patch b/patches/0015-locking-rtmutex-add-sleeping-lock-implementation.patch
index 3497f85a50a8..2a505cbbc320 100644
--- a/patches/0015-locking-rtmutex-add-sleeping-lock-implementation.patch
+++ b/patches/0015-locking-rtmutex-add-sleeping-lock-implementation.patch
@@ -122,7 +122,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* set_special_state() should be used for those states when the blocking task
* can not use the regular condition based wait-loop. In that case we must
-@@ -992,6 +998,7 @@ struct task_struct {
+@@ -1014,6 +1020,7 @@ struct task_struct {
raw_spinlock_t pi_lock;
struct wake_q_node wake_q;
@@ -362,7 +362,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -1480,6 +1480,7 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1481,6 +1481,7 @@ static int wake_futex_pi(u32 __user *uad
struct task_struct *new_owner;
bool postunlock = false;
DEFINE_WAKE_Q(wake_q);
@@ -370,7 +370,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int ret = 0;
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
-@@ -1541,13 +1542,13 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1542,13 +1543,13 @@ static int wake_futex_pi(u32 __user *uad
pi_state->owner = new_owner;
raw_spin_unlock(&new_owner->pi_lock);
@@ -387,7 +387,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -2855,7 +2856,7 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2856,7 +2857,7 @@ static int futex_lock_pi(u32 __user *uad
goto no_block;
}
@@ -396,7 +396,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
-@@ -3217,7 +3218,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3218,7 +3219,7 @@ static int futex_wait_requeue_pi(u32 __u
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
diff --git a/patches/0015-powerpc-mm-highmem-Switch-to-generic-kmap-atomic.patch b/patches/0015-powerpc-mm-highmem-Switch-to-generic-kmap-atomic.patch
index dc44856c7bd2..e4971e71cd17 100644
--- a/patches/0015-powerpc-mm-highmem-Switch-to-generic-kmap-atomic.patch
+++ b/patches/0015-powerpc-mm-highmem-Switch-to-generic-kmap-atomic.patch
@@ -177,7 +177,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-EXPORT_SYMBOL(kunmap_atomic_high);
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
-@@ -61,11 +61,6 @@
+@@ -62,11 +62,6 @@
unsigned long long memory_limit;
bool init_mem_is_free;
@@ -189,7 +189,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
-@@ -235,8 +230,6 @@ void __init paging_init(void)
+@@ -236,8 +231,6 @@ void __init paging_init(void)
map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */
pkmap_page_table = virt_to_kpte(PKMAP_BASE);
diff --git a/patches/0015-sched-Fix-migrate_disable-vs-rt-dl-balancing.patch b/patches/0015-sched-Fix-migrate_disable-vs-rt-dl-balancing.patch
index 1715e93d04b0..46c5d606aa7c 100644
--- a/patches/0015-sched-Fix-migrate_disable-vs-rt-dl-balancing.patch
+++ b/patches/0015-sched-Fix-migrate_disable-vs-rt-dl-balancing.patch
@@ -82,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* The reason we have it anyway.
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -716,8 +716,9 @@ struct task_struct {
+@@ -724,8 +724,9 @@ struct task_struct {
cpumask_t cpus_mask;
void *migration_pending;
#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT)
@@ -206,7 +206,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return false;
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
-@@ -2129,6 +2129,9 @@ static int push_dl_task(struct rq *rq)
+@@ -2135,6 +2135,9 @@ static int push_dl_task(struct rq *rq)
return 0;
retry:
@@ -216,7 +216,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (WARN_ON(next_task == rq->curr))
return 0;
-@@ -2206,7 +2209,7 @@ static void push_dl_tasks(struct rq *rq)
+@@ -2212,7 +2215,7 @@ static void push_dl_tasks(struct rq *rq)
static void pull_dl_task(struct rq *this_rq)
{
int this_cpu = this_rq->cpu, cpu;
@@ -225,7 +225,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
bool resched = false;
struct rq *src_rq;
u64 dmin = LONG_MAX;
-@@ -2236,6 +2239,7 @@ static void pull_dl_task(struct rq *this
+@@ -2242,6 +2245,7 @@ static void pull_dl_task(struct rq *this
continue;
/* Might drop this_rq->lock */
@@ -233,7 +233,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
double_lock_balance(this_rq, src_rq);
/*
-@@ -2267,17 +2271,27 @@ static void pull_dl_task(struct rq *this
+@@ -2273,17 +2277,27 @@ static void pull_dl_task(struct rq *this
src_rq->curr->dl.deadline))
goto skip;
@@ -267,7 +267,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
if (resched)
-@@ -2524,6 +2538,7 @@ const struct sched_class dl_sched_class
+@@ -2530,6 +2544,7 @@ const struct sched_class dl_sched_class
.rq_online = rq_online_dl,
.rq_offline = rq_offline_dl,
.task_woken = task_woken_dl,
diff --git a/patches/0017-sched-Add-migrate_disable-tracepoints.patch b/patches/0017-sched-Add-migrate_disable-tracepoints.patch
index 8f1bbe0978c3..3dd624e1a1e3 100644
--- a/patches/0017-sched-Add-migrate_disable-tracepoints.patch
+++ b/patches/0017-sched-Add-migrate_disable-tracepoints.patch
@@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
-@@ -2272,6 +2272,7 @@ static void pull_dl_task(struct rq *this
+@@ -2278,6 +2278,7 @@ static void pull_dl_task(struct rq *this
goto skip;
if (is_migration_disabled(p)) {
diff --git a/patches/0022-locking-rtmutex-Use-custom-scheduling-function-for-s.patch b/patches/0022-locking-rtmutex-Use-custom-scheduling-function-for-s.patch
index 9557a8b69006..e37962756af0 100644
--- a/patches/0022-locking-rtmutex-Use-custom-scheduling-function-for-s.patch
+++ b/patches/0022-locking-rtmutex-Use-custom-scheduling-function-for-s.patch
@@ -120,7 +120,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4965,7 +4965,7 @@ pick_next_task(struct rq *rq, struct tas
+@@ -4981,7 +4981,7 @@ pick_next_task(struct rq *rq, struct tas
*
* WARNING: must be called with preemption disabled!
*/
@@ -129,7 +129,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct task_struct *prev, *next;
unsigned long *switch_count;
-@@ -5018,7 +5018,7 @@ static void __sched notrace __schedule(b
+@@ -5034,7 +5034,7 @@ static void __sched notrace __schedule(b
* - ptrace_{,un}freeze_traced() can change ->state underneath us.
*/
prev_state = prev->state;
@@ -138,7 +138,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (signal_pending_state(prev_state, prev)) {
prev->state = TASK_RUNNING;
} else {
-@@ -5102,7 +5102,7 @@ void __noreturn do_task_dead(void)
+@@ -5118,7 +5118,7 @@ void __noreturn do_task_dead(void)
/* Tell freezer to ignore us: */
current->flags |= PF_NOFREEZE;
@@ -147,7 +147,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
BUG();
/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
-@@ -5135,9 +5135,6 @@ static inline void sched_submit_work(str
+@@ -5151,9 +5151,6 @@ static inline void sched_submit_work(str
preempt_enable_no_resched();
}
@@ -157,7 +157,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
-@@ -5163,7 +5160,7 @@ asmlinkage __visible void __sched schedu
+@@ -5179,7 +5176,7 @@ asmlinkage __visible void __sched schedu
sched_submit_work(tsk);
do {
preempt_disable();
@@ -166,7 +166,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
sched_preempt_enable_no_resched();
} while (need_resched());
sched_update_worker(tsk);
-@@ -5191,7 +5188,7 @@ void __sched schedule_idle(void)
+@@ -5207,7 +5204,7 @@ void __sched schedule_idle(void)
*/
WARN_ON_ONCE(current->state);
do {
@@ -175,7 +175,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
} while (need_resched());
}
-@@ -5244,7 +5241,7 @@ static void __sched notrace preempt_sche
+@@ -5260,7 +5257,7 @@ static void __sched notrace preempt_sche
*/
preempt_disable_notrace();
preempt_latency_start(1);
@@ -184,7 +184,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
preempt_latency_stop(1);
preempt_enable_no_resched_notrace();
-@@ -5274,6 +5271,19 @@ asmlinkage __visible void __sched notrac
+@@ -5290,6 +5287,19 @@ asmlinkage __visible void __sched notrac
NOKPROBE_SYMBOL(preempt_schedule);
EXPORT_SYMBOL(preempt_schedule);
@@ -204,7 +204,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* preempt_schedule_notrace - preempt_schedule called by tracing
*
-@@ -5317,7 +5327,7 @@ asmlinkage __visible void __sched notrac
+@@ -5333,7 +5343,7 @@ asmlinkage __visible void __sched notrac
* an infinite recursion.
*/
prev_ctx = exception_enter();
@@ -213,7 +213,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
exception_exit(prev_ctx);
preempt_latency_stop(1);
-@@ -5346,7 +5356,7 @@ asmlinkage __visible void __sched preemp
+@@ -5362,7 +5372,7 @@ asmlinkage __visible void __sched preemp
do {
preempt_disable();
local_irq_enable();
diff --git a/patches/0023-sched-Make-migrate_disable-enable-independent-of-RT.patch b/patches/0023-sched-Make-migrate_disable-enable-independent-of-RT.patch
index 49f7aa9248ff..c08ea25b65f8 100644
--- a/patches/0023-sched-Make-migrate_disable-enable-independent-of-RT.patch
+++ b/patches/0023-sched-Make-migrate_disable-enable-independent-of-RT.patch
@@ -144,7 +144,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* __LINUX_PREEMPT_H */
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -715,7 +715,7 @@ struct task_struct {
+@@ -723,7 +723,7 @@ struct task_struct {
const cpumask_t *cpus_ptr;
cpumask_t cpus_mask;
void *migration_pending;
@@ -173,7 +173,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Per-CPU kthreads are allowed to run on !active && online CPUs, see
* __set_cpus_allowed_ptr() and select_fallback_rq().
-@@ -2843,7 +2839,7 @@ void sched_set_stop_task(int cpu, struct
+@@ -2854,7 +2850,7 @@ void sched_set_stop_task(int cpu, struct
}
}
@@ -182,7 +182,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline int __set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask,
-@@ -2852,10 +2848,6 @@ static inline int __set_cpus_allowed_ptr
+@@ -2863,10 +2859,6 @@ static inline int __set_cpus_allowed_ptr
return set_cpus_allowed_ptr(p, new_mask);
}
@@ -193,7 +193,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { }
static inline bool rq_has_pinned_tasks(struct rq *rq)
-@@ -2863,7 +2855,7 @@ static inline bool rq_has_pinned_tasks(s
+@@ -2874,7 +2866,7 @@ static inline bool rq_has_pinned_tasks(s
return false;
}
@@ -202,7 +202,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void
ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
-@@ -7888,6 +7880,39 @@ void __cant_sleep(const char *file, int
+@@ -7905,6 +7897,39 @@ void __cant_sleep(const char *file, int
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
}
EXPORT_SYMBOL_GPL(__cant_sleep);
diff --git a/patches/0024-sched-highmem-Store-local-kmaps-in-task-struct.patch b/patches/0024-sched-highmem-Store-local-kmaps-in-task-struct.patch
index 494940343741..e1630dc4432d 100644
--- a/patches/0024-sched-highmem-Store-local-kmaps-in-task-struct.patch
+++ b/patches/0024-sched-highmem-Store-local-kmaps-in-task-struct.patch
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
-@@ -629,6 +630,13 @@ struct wake_q_node {
+@@ -637,6 +638,13 @@ struct wake_q_node {
struct wake_q_node *next;
};
@@ -71,7 +71,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct task_struct {
#ifdef CONFIG_THREAD_INFO_IN_TASK
/*
-@@ -1294,6 +1302,7 @@ struct task_struct {
+@@ -1316,6 +1324,7 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
@@ -109,7 +109,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
tsk->fail_nth = 0;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4055,6 +4055,22 @@ static inline void finish_lock_switch(st
+@@ -4071,6 +4071,22 @@ static inline void finish_lock_switch(st
# define finish_arch_post_lock_switch() do { } while (0)
#endif
@@ -132,7 +132,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* prepare_task_switch - prepare to switch tasks
* @rq: the runqueue preparing to switch
-@@ -4077,6 +4093,7 @@ prepare_task_switch(struct rq *rq, struc
+@@ -4093,6 +4109,7 @@ prepare_task_switch(struct rq *rq, struc
perf_event_task_sched_out(prev, next);
rseq_preempt(prev);
fire_sched_out_preempt_notifiers(prev, next);
@@ -140,7 +140,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
prepare_task(next);
prepare_arch_switch(next);
}
-@@ -4143,6 +4160,7 @@ static struct rq *finish_task_switch(str
+@@ -4159,6 +4176,7 @@ static struct rq *finish_task_switch(str
finish_lock_switch(rq);
finish_arch_post_lock_switch();
kcov_finish_switch(current);
diff --git a/patches/0036-drm-i915-Replace-io_mapping_map_atomic_wc.patch b/patches/0036-drm-i915-Replace-io_mapping_map_atomic_wc.patch
index 4cdc4274aaba..37bc120c35ce 100644
--- a/patches/0036-drm-i915-Replace-io_mapping_map_atomic_wc.patch
+++ b/patches/0036-drm-i915-Replace-io_mapping_map_atomic_wc.patch
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
-@@ -379,22 +379,15 @@ gtt_user_read(struct io_mapping *mapping
+@@ -355,22 +355,15 @@ gtt_user_read(struct io_mapping *mapping
char __user *user_data, int length)
{
void __iomem *vaddr;
@@ -87,7 +87,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static int
-@@ -557,21 +550,14 @@ ggtt_write(struct io_mapping *mapping,
+@@ -539,21 +532,14 @@ ggtt_write(struct io_mapping *mapping,
char __user *user_data, int length)
{
void __iomem *vaddr;
diff --git a/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch b/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
index cec5cca10408..364b8bbfbfc6 100644
--- a/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
+++ b/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
-@@ -685,7 +685,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -701,7 +701,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
* involves poking the GIC, which must be done in a
* non-preemptible context.
*/
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
kvm_pmu_flush_hwstate(vcpu);
-@@ -734,7 +734,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -750,7 +750,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
kvm_timer_sync_user(vcpu);
kvm_vgic_sync_hwstate(vcpu);
local_irq_enable();
@@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
}
-@@ -806,7 +806,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -822,7 +822,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
/* Exit types that need handling before we can be preempted */
handle_exit_early(vcpu, ret);
diff --git a/patches/clk-imx8qxp-Unbreak-auto-module-building-for-MXC_CLK.patch b/patches/clk-imx8qxp-Unbreak-auto-module-building-for-MXC_CLK.patch
new file mode 100644
index 000000000000..770b0645b039
--- /dev/null
+++ b/patches/clk-imx8qxp-Unbreak-auto-module-building-for-MXC_CLK.patch
@@ -0,0 +1,75 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 24 Nov 2020 12:29:53 +0100
+Subject: [PATCH] clk: imx8qxp: Unbreak auto module building for MXC_CLK_SCU
+
+Automatic moudule building is broken by adding module support to
+i.MX8QXP clock driver. It can be tested by ARM defconfig +
+CONFIG_IMX_MBOX=m and CONFIG_MXC_CLK_SCU=m.
+
+The compile breaks because the modules and source files are mixed.
+After fixing that, the build breaks because the SCU driver has no
+license or symbols, which are required by the CLK_IMX8QXP driver, are
+not properly exported.
+
+Compile module clk-imx-scu.o which contains of clk-scu.o clk-lpcg-scu.o
+if CONFIG_MXC_CLK_SCU is enabled.
+Compile modules clk-imx8qxp.o and clk-imx8qxp-lpcg.o if
+CONFIG_CLK_IMX8QXP is enabled.
+Add EXPORT_SYMBOL_GPL() to functions which fail to resolve once
+CONFIG_CLK_IMX8QXP is enabled as module.
+Add License GPL to clk-scu.c.
+
+Fixes: e0d0d4d86c766 ("clk: imx8qxp: Support building i.MX8QXP clock driver as module")
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/clk/imx/Makefile | 6 +++---
+ drivers/clk/imx/clk-lpcg-scu.c | 1 +
+ drivers/clk/imx/clk-scu.c | 4 ++++
+ 3 files changed, 8 insertions(+), 3 deletions(-)
+
+--- a/drivers/clk/imx/Makefile
++++ b/drivers/clk/imx/Makefile
+@@ -26,9 +26,9 @@ obj-$(CONFIG_CLK_IMX8MN) += clk-imx8mn.o
+ obj-$(CONFIG_CLK_IMX8MP) += clk-imx8mp.o
+ obj-$(CONFIG_CLK_IMX8MQ) += clk-imx8mq.o
+
+-obj-$(CONFIG_MXC_CLK_SCU) += clk-imx-scu.o clk-imx-lpcg-scu.o
+-clk-imx-scu-$(CONFIG_CLK_IMX8QXP) += clk-scu.o clk-imx8qxp.o
+-clk-imx-lpcg-scu-$(CONFIG_CLK_IMX8QXP) += clk-lpcg-scu.o clk-imx8qxp-lpcg.o
++obj-$(CONFIG_MXC_CLK_SCU) += clk-imx-scu.o
++clk-imx-scu-y := clk-scu.o clk-lpcg-scu.o
++obj-$(CONFIG_CLK_IMX8QXP) += clk-imx8qxp.o clk-imx8qxp-lpcg.o
+
+ obj-$(CONFIG_CLK_IMX1) += clk-imx1.o
+ obj-$(CONFIG_CLK_IMX25) += clk-imx25.o
+--- a/drivers/clk/imx/clk-lpcg-scu.c
++++ b/drivers/clk/imx/clk-lpcg-scu.c
+@@ -115,3 +115,4 @@ struct clk_hw *imx_clk_lpcg_scu(const ch
+
+ return hw;
+ }
++EXPORT_SYMBOL_GPL(imx_clk_lpcg_scu);
+--- a/drivers/clk/imx/clk-scu.c
++++ b/drivers/clk/imx/clk-scu.c
+@@ -9,6 +9,7 @@
+ #include <linux/clk-provider.h>
+ #include <linux/err.h>
+ #include <linux/slab.h>
++#include <linux/module.h>
+
+ #include "clk-scu.h"
+
+@@ -132,6 +133,7 @@ int imx_clk_scu_init(void)
+ {
+ return imx_scu_get_handle(&ccm_ipc_handle);
+ }
++EXPORT_SYMBOL_GPL(imx_clk_scu_init);
+
+ /*
+ * clk_scu_recalc_rate - Get clock rate for a SCU clock
+@@ -387,3 +389,5 @@ struct clk_hw *__imx_clk_scu(const char
+
+ return hw;
+ }
++EXPORT_SYMBOL_GPL(__imx_clk_scu);
++MODULE_LICENSE("GPL");
diff --git a/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch b/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
index 86b97d443e74..0c47636b0545 100644
--- a/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
+++ b/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
@@ -132,7 +132,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct dentry *dentry;
struct dentry *alias;
struct inode *inode;
-@@ -1665,7 +1665,7 @@ int nfs_atomic_open(struct inode *dir, s
+@@ -1660,7 +1660,7 @@ int nfs_atomic_open(struct inode *dir, s
struct file *file, unsigned open_flags,
umode_t mode)
{
diff --git a/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
index e38b189c7507..b677341b3eea 100644
--- a/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
+++ b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
task_numa_free(tsk, true);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4244,15 +4244,6 @@ static struct rq *finish_task_switch(str
+@@ -4260,15 +4260,6 @@ static struct rq *finish_task_switch(str
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 02952cda4bfa..e16fb07c0a7d 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt9
++-rt10
diff --git a/patches/lockdep-no-softirq-accounting-on-rt.patch b/patches/lockdep-no-softirq-accounting-on-rt.patch
index 325db985579c..2a9843aa018d 100644
--- a/patches/lockdep-no-softirq-accounting-on-rt.patch
+++ b/patches/lockdep-no-softirq-accounting-on-rt.patch
@@ -51,7 +51,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void stop_critical_timings(void);
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
-@@ -5286,6 +5286,7 @@ static void check_flags(unsigned long fl
+@@ -5287,6 +5287,7 @@ static void check_flags(unsigned long fl
}
}
@@ -59,7 +59,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* We dont accurately track softirq state in e.g.
* hardirq contexts (such as on 4KSTACKS), so only
-@@ -5300,6 +5301,7 @@ static void check_flags(unsigned long fl
+@@ -5301,6 +5302,7 @@ static void check_flags(unsigned long fl
DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
}
}
diff --git a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
index 8287cee2611d..29e5b8a05c32 100644
--- a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
+++ b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -2348,7 +2348,7 @@ static void drain_all_stock(struct mem_c
+@@ -2353,7 +2353,7 @@ static void drain_all_stock(struct mem_c
* as well as workers from this path always operate on the local
* per-cpu data. CPU up doesn't touch memcg_stock at all.
*/
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
struct mem_cgroup *memcg;
-@@ -2371,7 +2371,7 @@ static void drain_all_stock(struct mem_c
+@@ -2376,7 +2376,7 @@ static void drain_all_stock(struct mem_c
schedule_work_on(cpu, &stock->work);
}
}
diff --git a/patches/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch b/patches/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch
index b0c41d73998c..47a90c012d71 100644
--- a/patches/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch
+++ b/patches/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch
@@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -2201,6 +2201,7 @@ void unlock_page_memcg(struct page *page
+@@ -2206,6 +2206,7 @@ void unlock_page_memcg(struct page *page
EXPORT_SYMBOL(unlock_page_memcg);
struct memcg_stock_pcp {
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct mem_cgroup *cached; /* this never be root cgroup */
unsigned int nr_pages;
-@@ -2252,7 +2253,7 @@ static bool consume_stock(struct mem_cgr
+@@ -2257,7 +2258,7 @@ static bool consume_stock(struct mem_cgr
if (nr_pages > MEMCG_CHARGE_BATCH)
return ret;
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
stock = this_cpu_ptr(&memcg_stock);
if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
-@@ -2260,7 +2261,7 @@ static bool consume_stock(struct mem_cgr
+@@ -2265,7 +2266,7 @@ static bool consume_stock(struct mem_cgr
ret = true;
}
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -2295,14 +2296,14 @@ static void drain_local_stock(struct wor
+@@ -2300,14 +2301,14 @@ static void drain_local_stock(struct wor
* The only protection from memory hotplug vs. drain_stock races is
* that we always operate on local CPU stock here with IRQ disabled
*/
@@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2314,7 +2315,7 @@ static void refill_stock(struct mem_cgro
+@@ -2319,7 +2320,7 @@ static void refill_stock(struct mem_cgro
struct memcg_stock_pcp *stock;
unsigned long flags;
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
stock = this_cpu_ptr(&memcg_stock);
if (stock->cached != memcg) { /* reset if necessary */
-@@ -2327,7 +2328,7 @@ static void refill_stock(struct mem_cgro
+@@ -2332,7 +2333,7 @@ static void refill_stock(struct mem_cgro
if (stock->nr_pages > MEMCG_CHARGE_BATCH)
drain_stock(stock);
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -3139,7 +3140,7 @@ static bool consume_obj_stock(struct obj
+@@ -3144,7 +3145,7 @@ static bool consume_obj_stock(struct obj
unsigned long flags;
bool ret = false;
@@ -90,7 +90,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
stock = this_cpu_ptr(&memcg_stock);
if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
-@@ -3147,7 +3148,7 @@ static bool consume_obj_stock(struct obj
+@@ -3152,7 +3153,7 @@ static bool consume_obj_stock(struct obj
ret = true;
}
@@ -99,7 +99,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -3206,7 +3207,7 @@ static void refill_obj_stock(struct obj_
+@@ -3211,7 +3212,7 @@ static void refill_obj_stock(struct obj_
struct memcg_stock_pcp *stock;
unsigned long flags;
@@ -108,7 +108,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
stock = this_cpu_ptr(&memcg_stock);
if (stock->cached_objcg != objcg) { /* reset if necessary */
-@@ -3220,7 +3221,7 @@ static void refill_obj_stock(struct obj_
+@@ -3225,7 +3226,7 @@ static void refill_obj_stock(struct obj_
if (stock->nr_bytes > PAGE_SIZE)
drain_obj_stock(stock);
@@ -117,7 +117,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
-@@ -7130,9 +7131,13 @@ static int __init mem_cgroup_init(void)
+@@ -7135,9 +7136,13 @@ static int __init mem_cgroup_init(void)
cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
memcg_hotplug_cpu_dead);
diff --git a/patches/mm-memcontrol-do_not_disable_irq.patch b/patches/mm-memcontrol-do_not_disable_irq.patch
index 95177103ffbb..bff38083e8cb 100644
--- a/patches/mm-memcontrol-do_not_disable_irq.patch
+++ b/patches/mm-memcontrol-do_not_disable_irq.patch
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
{
-@@ -5727,12 +5735,12 @@ static int mem_cgroup_move_account(struc
+@@ -5732,12 +5740,12 @@ static int mem_cgroup_move_account(struc
ret = 0;
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
out_unlock:
unlock_page(page);
out:
-@@ -6803,10 +6811,10 @@ int mem_cgroup_charge(struct page *page,
+@@ -6808,10 +6816,10 @@ int mem_cgroup_charge(struct page *page,
css_get(&memcg->css);
commit_charge(page, memcg);
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (PageSwapCache(page)) {
swp_entry_t entry = { .val = page_private(page) };
-@@ -6850,11 +6858,11 @@ static void uncharge_batch(const struct
+@@ -6855,11 +6863,11 @@ static void uncharge_batch(const struct
memcg_oom_recover(ug->memcg);
}
@@ -78,7 +78,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* drop reference from uncharge_page */
css_put(&ug->memcg->css);
-@@ -7008,10 +7016,10 @@ void mem_cgroup_migrate(struct page *old
+@@ -7013,10 +7021,10 @@ void mem_cgroup_migrate(struct page *old
css_get(&memcg->css);
commit_charge(newpage, memcg);
@@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
-@@ -7186,6 +7194,7 @@ void mem_cgroup_swapout(struct page *pag
+@@ -7191,6 +7199,7 @@ void mem_cgroup_swapout(struct page *pag
struct mem_cgroup *memcg, *swap_memcg;
unsigned int nr_entries;
unsigned short oldid;
@@ -99,7 +99,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(page_count(page), page);
-@@ -7231,9 +7240,13 @@ void mem_cgroup_swapout(struct page *pag
+@@ -7236,9 +7245,13 @@ void mem_cgroup_swapout(struct page *pag
* important here to have the interrupts disabled because it is the
* only synchronisation we have for updating the per-CPU variables.
*/
diff --git a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
index b1e835ba9bba..6b222ea636a0 100644
--- a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
+++ b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
@@ -170,7 +170,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return NULL;
}
-@@ -8792,7 +8801,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -8797,7 +8806,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
@@ -179,7 +179,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -8801,7 +8810,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -8806,7 +8815,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
diff --git a/patches/oleg-signal-rt-fix.patch b/patches/oleg-signal-rt-fix.patch
index 7c47504aa624..9621ffe16ca3 100644
--- a/patches/oleg-signal-rt-fix.patch
+++ b/patches/oleg-signal-rt-fix.patch
@@ -62,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -972,6 +972,10 @@ struct task_struct {
+@@ -994,6 +994,10 @@ struct task_struct {
/* Restored if set_restore_sigmask() was used: */
sigset_t saved_sigmask;
struct sigpending pending;
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index b98d1f27e646..60ac34e5fdca 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -174,7 +174,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1850,6 +1850,44 @@ static inline int test_tsk_need_resched(
+@@ -1872,6 +1872,44 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -331,7 +331,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_enable();
trace_sched_migrate_enable_tp(p);
-@@ -3806,6 +3850,9 @@ int sched_fork(unsigned long clone_flags
+@@ -3822,6 +3866,9 @@ int sched_fork(unsigned long clone_flags
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -341,7 +341,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -5065,6 +5112,7 @@ static void __sched notrace __schedule(b
+@@ -5081,6 +5128,7 @@ static void __sched notrace __schedule(b
next = pick_next_task(rq, prev, &rf);
clear_tsk_need_resched(prev);
@@ -349,7 +349,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_preempt_need_resched();
if (likely(prev != next)) {
-@@ -5264,6 +5312,30 @@ static void __sched notrace preempt_sche
+@@ -5280,6 +5328,30 @@ static void __sched notrace preempt_sche
} while (need_resched());
}
@@ -380,7 +380,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PREEMPTION
/*
* This is the entry point to schedule() from in-kernel preemption
-@@ -5277,7 +5349,8 @@ asmlinkage __visible void __sched notrac
+@@ -5293,7 +5365,8 @@ asmlinkage __visible void __sched notrac
*/
if (likely(!preemptible()))
return;
@@ -390,7 +390,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
-@@ -5317,6 +5390,9 @@ asmlinkage __visible void __sched notrac
+@@ -5333,6 +5406,9 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
@@ -400,7 +400,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
/*
* Because the function tracer can trace preempt_count_sub()
-@@ -7155,7 +7231,9 @@ void init_idle(struct task_struct *idle,
+@@ -7172,7 +7248,9 @@ void init_idle(struct task_struct *idle,
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
@@ -458,7 +458,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
hrtick_start(rq, delta);
-@@ -6959,7 +6959,7 @@ static void check_preempt_wakeup(struct
+@@ -6979,7 +6979,7 @@ static void check_preempt_wakeup(struct
return;
preempt:
@@ -467,7 +467,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -10715,7 +10715,7 @@ static void task_fork_fair(struct task_s
+@@ -10736,7 +10736,7 @@ static void task_fork_fair(struct task_s
* 'current' within the tree based on its new key value.
*/
swap(curr->vruntime, se->vruntime);
@@ -476,7 +476,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
se->vruntime -= cfs_rq->min_vruntime;
-@@ -10742,7 +10742,7 @@ prio_changed_fair(struct rq *rq, struct
+@@ -10763,7 +10763,7 @@ prio_changed_fair(struct rq *rq, struct
*/
if (rq->curr == p) {
if (p->prio > oldprio)
diff --git a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
index 7fc725351459..31698e60110f 100644
--- a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+++ b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
/*
-@@ -1854,6 +1850,51 @@ static inline int test_tsk_need_resched(
+@@ -1876,6 +1872,51 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock_irq(&task->sighand->siglock);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2562,6 +2562,18 @@ int migrate_swap(struct task_struct *cur
+@@ -2573,6 +2573,18 @@ int migrate_swap(struct task_struct *cur
}
#endif /* CONFIG_NUMA_BALANCING */
@@ -134,7 +134,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* wait_task_inactive - wait for a thread to unschedule.
*
-@@ -2606,7 +2618,7 @@ unsigned long wait_task_inactive(struct
+@@ -2617,7 +2629,7 @@ unsigned long wait_task_inactive(struct
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
@@ -143,7 +143,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
cpu_relax();
}
-@@ -2621,7 +2633,8 @@ unsigned long wait_task_inactive(struct
+@@ -2632,7 +2644,8 @@ unsigned long wait_task_inactive(struct
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
diff --git a/patches/sched-Fix-migration_cpu_stop-WARN.patch b/patches/sched-Fix-migration_cpu_stop-WARN.patch
new file mode 100644
index 000000000000..7db213c74da6
--- /dev/null
+++ b/patches/sched-Fix-migration_cpu_stop-WARN.patch
@@ -0,0 +1,40 @@
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 17 Nov 2020 12:14:51 +0100
+Subject: [PATCH] sched: Fix migration_cpu_stop() WARN
+
+Oleksandr reported hitting the WARN in the 'task_rq(p) != rq' branch
+of migration_cpu_stop(). Valentin noted that using cpu_of(rq) in that
+case is just plain wrong to begin with, since per the earlier branch
+that isn't the actual CPU of the task.
+
+Replace both instances of is_cpu_allowed() by a direct p->cpus_mask
+test using task_cpu().
+
+Reported-by: Oleksandr Natalenko <oleksandr@natalenko.name>
+Debugged-by: Valentin Schneider <valentin.schneider@arm.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/sched/core.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1915,7 +1915,7 @@ static int migration_cpu_stop(void *data
+ * and we should be valid again. Nothing to do.
+ */
+ if (!pending) {
+- WARN_ON_ONCE(!is_cpu_allowed(p, cpu_of(rq)));
++ WARN_ON_ONCE(!cpumask_test_cpu(task_cpu(p), &p->cpus_mask));
+ goto out;
+ }
+
+@@ -1943,7 +1943,7 @@ static int migration_cpu_stop(void *data
+ * valid again. Nothing to do.
+ */
+ if (!pending) {
+- WARN_ON_ONCE(!is_cpu_allowed(p, cpu_of(rq)));
++ WARN_ON_ONCE(!cpumask_test_cpu(task_cpu(p), &p->cpus_mask));
+ goto out;
+ }
+
diff --git a/patches/sched-core-Add-missing-completion-for-affine_move_ta.patch b/patches/sched-core-Add-missing-completion-for-affine_move_ta.patch
new file mode 100644
index 000000000000..b7e7ff6de118
--- /dev/null
+++ b/patches/sched-core-Add-missing-completion-for-affine_move_ta.patch
@@ -0,0 +1,72 @@
+From: Valentin Schneider <valentin.schneider@arm.com>
+Date: Fri, 13 Nov 2020 11:24:14 +0000
+Subject: [PATCH] sched/core: Add missing completion for affine_move_task()
+ waiters
+
+Qian reported that some fuzzer issuing sched_setaffinity() ends up stuck on
+a wait_for_completion(). The problematic pattern seems to be:
+
+ affine_move_task()
+ // task_running() case
+ stop_one_cpu();
+ wait_for_completion(&pending->done);
+
+Combined with, on the stopper side:
+
+ migration_cpu_stop()
+ // Task moved between unlocks and scheduling the stopper
+ task_rq(p) != rq &&
+ // task_running() case
+ dest_cpu >= 0
+
+ => no complete_all()
+
+This can happen with both PREEMPT and !PREEMPT, although !PREEMPT should
+be more likely to see this given the targeted task has a much bigger window
+to block and be woken up elsewhere before the stopper runs.
+
+Make migration_cpu_stop() always look at pending affinity requests; signal
+their completion if the stopper hits a rq mismatch but the task is
+still within its allowed mask. When Migrate-Disable isn't involved, this
+matches the previous set_cpus_allowed_ptr() vs migration_cpu_stop()
+behaviour.
+
+Fixes: 6d337eab041d ("sched: Fix migrate_disable() vs set_cpus_allowed_ptr()")
+Reported-by: Qian Cai <cai@redhat.com>
+Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lore.kernel.org/lkml/8b62fd1ad1b18def27f18e2ee2df3ff5b36d0762.camel@redhat.com
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/sched/core.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1927,7 +1927,7 @@ static int migration_cpu_stop(void *data
+ else
+ p->wake_cpu = dest_cpu;
+
+- } else if (dest_cpu < 0) {
++ } else if (dest_cpu < 0 || pending) {
+ /*
+ * This happens when we get migrated between migrate_enable()'s
+ * preempt_enable() and scheduling the stopper task. At that
+@@ -1938,6 +1938,17 @@ static int migration_cpu_stop(void *data
+ */
+
+ /*
++ * The task moved before the stopper got to run. We're holding
++ * ->pi_lock, so the allowed mask is stable - if it got
++ * somewhere allowed, we're done.
++ */
++ if (pending && cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
++ p->migration_pending = NULL;
++ complete = true;
++ goto out;
++ }
++
++ /*
+ * When this was migrate_enable() but we no longer have an
+ * @pending, a concurrent SCA 'fixed' things and we should be
+ * valid again. Nothing to do.
diff --git a/patches/sched-might-sleep-do-not-account-rcu-depth.patch b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
index b9d624063e80..69a8499b11f1 100644
--- a/patches/sched-might-sleep-do-not-account-rcu-depth.patch
+++ b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Internal to kernel */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7864,7 +7864,7 @@ void __init sched_init(void)
+@@ -7881,7 +7881,7 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
diff --git a/patches/sched-mmdrop-delayed.patch b/patches/sched-mmdrop-delayed.patch
index c0e3515d433d..46a0bf2fd08e 100644
--- a/patches/sched-mmdrop-delayed.patch
+++ b/patches/sched-mmdrop-delayed.patch
@@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct mm_struct *mm;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4232,9 +4232,13 @@ static struct rq *finish_task_switch(str
+@@ -4248,9 +4248,13 @@ static struct rq *finish_task_switch(str
* provided by mmdrop(),
* - a sync_core for SYNC_CORE.
*/
@@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead)
-@@ -7252,6 +7256,7 @@ void sched_setnuma(struct task_struct *p
+@@ -7269,6 +7273,7 @@ void sched_setnuma(struct task_struct *p
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/patches/series b/patches/series
index b4463711602b..59669d50e9b7 100644
--- a/patches/series
+++ b/patches/series
@@ -32,6 +32,10 @@
0018-sched-Deny-self-issued-__set_cpus_allowed_ptr-when-m.patch
0019-sched-Comment-affine_move_task.patch
sched-Unlock-the-rq-in-affine_move_task-error-path.patch
+# tip 1293771e4353c148d5f6908fb32d1c1cfd653e47
+sched-Fix-migration_cpu_stop-WARN.patch
+# tip d707faa64d03d26b529cc4aea59dab1b016d4d3
+sched-core-Add-missing-completion-for-affine_move_ta.patch
# 2020-11-03 10:27 Thomas Gleixner [patch V3 00/37] mm/highmem: Preemptible variant of kmap_atomic & friends
# 20201103092712.714480842@linutronix.de
@@ -99,6 +103,9 @@ timers-Don-t-block-on-expiry_lock-for-TIMER_IRQSAFE.patch
0001-kthread-Move-prio-affinite-change-into-the-newly-cre.patch
0002-genirq-Move-prio-assignment-into-the-newly-created-t.patch
+# 20201124121740.ytag7rm53umi2qvm@linutronix.de
+clk-imx8qxp-Unbreak-auto-module-building-for-MXC_CLK.patch
+
############################################################
# Ready for posting
############################################################
diff --git a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
index 9ce4f1bceb3c..1cceda460d77 100644
--- a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
+++ b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -963,6 +963,7 @@ struct task_struct {
+@@ -985,6 +985,7 @@ struct task_struct {
/* Signal handlers: */
struct signal_struct *signal;
struct sighand_struct __rcu *sighand;
diff --git a/patches/x86-kvm-require-const-tsc-for-rt.patch b/patches/x86-kvm-require-const-tsc-for-rt.patch
index 38857d2f3481..0f7b158d2a09 100644
--- a/patches/x86-kvm-require-const-tsc-for-rt.patch
+++ b/patches/x86-kvm-require-const-tsc-for-rt.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -7832,6 +7832,14 @@ int kvm_arch_init(void *opaque)
+@@ -7870,6 +7870,14 @@ int kvm_arch_init(void *opaque)
goto out;
}