diff options
Diffstat (limited to 'patches/preempt-lazy-support.patch')
-rw-r--r-- | patches/preempt-lazy-support.patch | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch index b98d1f27e646..60ac34e5fdca 100644 --- a/patches/preempt-lazy-support.patch +++ b/patches/preempt-lazy-support.patch @@ -174,7 +174,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -1850,6 +1850,44 @@ static inline int test_tsk_need_resched( +@@ -1872,6 +1872,44 @@ static inline int test_tsk_need_resched( return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } @@ -331,7 +331,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> preempt_enable(); trace_sched_migrate_enable_tp(p); -@@ -3806,6 +3850,9 @@ int sched_fork(unsigned long clone_flags +@@ -3822,6 +3866,9 @@ int sched_fork(unsigned long clone_flags p->on_cpu = 0; #endif init_task_preempt_count(p); @@ -341,7 +341,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #ifdef CONFIG_SMP plist_node_init(&p->pushable_tasks, MAX_PRIO); RB_CLEAR_NODE(&p->pushable_dl_tasks); -@@ -5065,6 +5112,7 @@ static void __sched notrace __schedule(b +@@ -5081,6 +5128,7 @@ static void __sched notrace __schedule(b next = pick_next_task(rq, prev, &rf); clear_tsk_need_resched(prev); @@ -349,7 +349,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> clear_preempt_need_resched(); if (likely(prev != next)) { -@@ -5264,6 +5312,30 @@ static void __sched notrace preempt_sche +@@ -5280,6 +5328,30 @@ static void __sched notrace preempt_sche } while (need_resched()); } @@ -380,7 +380,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #ifdef CONFIG_PREEMPTION /* * This is the entry point to schedule() from in-kernel preemption -@@ -5277,7 +5349,8 @@ asmlinkage __visible void __sched notrac +@@ -5293,7 +5365,8 @@ asmlinkage __visible void __sched notrac */ if (likely(!preemptible())) return; @@ -390,7 +390,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> preempt_schedule_common(); } NOKPROBE_SYMBOL(preempt_schedule); -@@ -5317,6 +5390,9 @@ asmlinkage __visible void __sched notrac +@@ -5333,6 +5406,9 @@ asmlinkage __visible void __sched notrac if (likely(!preemptible())) return; @@ -400,7 +400,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> do { /* * Because the function tracer can trace preempt_count_sub() -@@ -7155,7 +7231,9 @@ void init_idle(struct task_struct *idle, +@@ -7172,7 +7248,9 @@ void init_idle(struct task_struct *idle, /* Set the preempt count _outside_ the spinlocks! */ init_idle_preempt_count(idle, cpu); @@ -458,7 +458,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return; } hrtick_start(rq, delta); -@@ -6959,7 +6959,7 @@ static void check_preempt_wakeup(struct +@@ -6979,7 +6979,7 @@ static void check_preempt_wakeup(struct return; preempt: @@ -467,7 +467,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * Only set the backward buddy when the current task is still * on the rq. This can happen when a wakeup gets interleaved -@@ -10715,7 +10715,7 @@ static void task_fork_fair(struct task_s +@@ -10736,7 +10736,7 @@ static void task_fork_fair(struct task_s * 'current' within the tree based on its new key value. */ swap(curr->vruntime, se->vruntime); @@ -476,7 +476,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } se->vruntime -= cfs_rq->min_vruntime; -@@ -10742,7 +10742,7 @@ prio_changed_fair(struct rq *rq, struct +@@ -10763,7 +10763,7 @@ prio_changed_fair(struct rq *rq, struct */ if (rq->curr == p) { if (p->prio > oldprio) |