diff options
Diffstat (limited to 'patches/preempt-lazy-support.patch')
-rw-r--r-- | patches/preempt-lazy-support.patch | 134 |
1 files changed, 53 insertions, 81 deletions
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch index 49b4bba08e30..15f31426df25 100644 --- a/patches/preempt-lazy-support.patch +++ b/patches/preempt-lazy-support.patch @@ -57,15 +57,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> include/linux/thread_info.h | 12 +++++- include/linux/trace_events.h | 1 kernel/Kconfig.preempt | 6 +++ - kernel/cpu.c | 2 + - kernel/sched/core.c | 83 +++++++++++++++++++++++++++++++++++++++++-- + kernel/sched/core.c | 82 +++++++++++++++++++++++++++++++++++++++++-- kernel/sched/fair.c | 16 ++++---- kernel/sched/features.h | 3 + kernel/sched/sched.h | 9 ++++ kernel/trace/trace.c | 35 ++++++++++-------- kernel/trace/trace.h | 2 + kernel/trace/trace_output.c | 14 ++++++- - 13 files changed, 227 insertions(+), 29 deletions(-) + 12 files changed, 224 insertions(+), 29 deletions(-) --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -114,7 +113,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> + preempt_check_resched(); \ +} while (0) + - #else /* !CONFIG_PREEMPT */ + #else /* !CONFIG_PREEMPTION */ #define preempt_enable() \ do { \ @@ -254,6 +281,12 @@ do { \ @@ -141,7 +140,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -1744,6 +1744,44 @@ static inline int test_tsk_need_resched( +@@ -1791,6 +1791,44 @@ static inline int test_tsk_need_resched( return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } @@ -219,40 +218,21 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #define TRACE_EVENT_TYPE_MAX \ --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt -@@ -7,6 +7,12 @@ config PREEMPT_RT_BASE - bool - select PREEMPT +@@ -1,5 +1,11 @@ + # SPDX-License-Identifier: GPL-2.0-only +config HAVE_PREEMPT_LAZY + bool + +config PREEMPT_LAZY -+ def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL ++ def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT + choice prompt "Preemption Model" default PREEMPT_NONE ---- a/kernel/cpu.c -+++ b/kernel/cpu.c -@@ -305,6 +305,7 @@ void pin_current_cpu(void) - return; - } - cpu = smp_processor_id(); -+ preempt_lazy_enable(); - preempt_enable(); - - sleeping_lock_inc(); -@@ -312,6 +313,7 @@ void pin_current_cpu(void) - sleeping_lock_dec(); - - preempt_disable(); -+ preempt_lazy_disable(); - if (cpu != smp_processor_id()) { - __read_rt_unlock(cpuhp_pin); - goto again; --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -544,6 +544,48 @@ void resched_curr(struct rq *rq) +@@ -555,6 +555,48 @@ void resched_curr(struct rq *rq) trace_sched_wake_idle_without_ipi(cpu); } @@ -301,7 +281,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> void resched_cpu(int cpu) { struct rq *rq = cpu_rq(cpu); -@@ -2449,6 +2491,9 @@ int sched_fork(unsigned long clone_flags +@@ -3002,6 +3044,9 @@ int sched_fork(unsigned long clone_flags p->on_cpu = 0; #endif init_task_preempt_count(p); @@ -311,7 +291,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #ifdef CONFIG_SMP plist_node_init(&p->pushable_tasks, MAX_PRIO); RB_CLEAR_NODE(&p->pushable_dl_tasks); -@@ -3548,6 +3593,7 @@ static void __sched notrace __schedule(b +@@ -4139,6 +4184,7 @@ static void __sched notrace __schedule(b next = pick_next_task(rq, prev, &rf); clear_tsk_need_resched(prev); @@ -319,7 +299,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> clear_preempt_need_resched(); if (likely(prev != next)) { -@@ -3731,6 +3777,30 @@ static void __sched notrace preempt_sche +@@ -4326,6 +4372,30 @@ static void __sched notrace preempt_sche } while (need_resched()); } @@ -347,10 +327,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> + +#endif + - #ifdef CONFIG_PREEMPT + #ifdef CONFIG_PREEMPTION /* - * this is the entry point to schedule() from in-kernel preemption -@@ -3745,7 +3815,8 @@ asmlinkage __visible void __sched notrac + * This is the entry point to schedule() from in-kernel preemption +@@ -4339,7 +4409,8 @@ asmlinkage __visible void __sched notrac */ if (likely(!preemptible())) return; @@ -360,7 +340,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> preempt_schedule_common(); } NOKPROBE_SYMBOL(preempt_schedule); -@@ -3772,6 +3843,9 @@ asmlinkage __visible void __sched notrac +@@ -4366,6 +4437,9 @@ asmlinkage __visible void __sched notrac if (likely(!preemptible())) return; @@ -370,7 +350,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> do { /* * Because the function tracer can trace preempt_count_sub() -@@ -5537,7 +5611,9 @@ void init_idle(struct task_struct *idle, +@@ -6156,7 +6230,9 @@ void init_idle(struct task_struct *idle, /* Set the preempt count _outside_ the spinlocks! */ init_idle_preempt_count(idle, cpu); @@ -381,33 +361,25 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * The idle tasks have their own, simple scheduling class: */ -@@ -7287,6 +7363,7 @@ void migrate_disable(void) +@@ -8093,6 +8169,7 @@ void migrate_disable(void) + + if (++current->migrate_disable == 1) { + this_rq()->nr_pinned++; ++ preempt_lazy_disable(); + #ifdef CONFIG_SCHED_DEBUG + WARN_ON_ONCE(current->pinned_on_cpu >= 0); + current->pinned_on_cpu = smp_processor_id(); +@@ -8169,6 +8246,7 @@ void migrate_enable(void) } - preempt_disable(); -+ preempt_lazy_disable(); - pin_current_cpu(); - - migrate_disable_update_cpus_allowed(p); -@@ -7354,6 +7431,7 @@ void migrate_enable(void) - arg.dest_cpu = dest_cpu; - - unpin_current_cpu(); -+ preempt_lazy_enable(); - preempt_enable(); - - sleeping_lock_inc(); -@@ -7363,6 +7441,7 @@ void migrate_enable(void) - } - } - unpin_current_cpu(); + out: + preempt_lazy_enable(); preempt_enable(); } EXPORT_SYMBOL(migrate_enable); --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -4104,7 +4104,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq +@@ -4122,7 +4122,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq ideal_runtime = sched_slice(cfs_rq, curr); delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; if (delta_exec > ideal_runtime) { @@ -416,7 +388,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * The current task ran long enough, ensure it doesn't get * re-elected due to buddy favours. -@@ -4128,7 +4128,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq +@@ -4146,7 +4146,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq return; if (delta > ideal_runtime) @@ -425,7 +397,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } static void -@@ -4270,7 +4270,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc +@@ -4289,7 +4289,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc * validating it and just reschedule. */ if (queued) { @@ -434,7 +406,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return; } /* -@@ -4456,7 +4456,7 @@ static void __account_cfs_rq_runtime(str +@@ -4414,7 +4414,7 @@ static void __account_cfs_rq_runtime(str * hierarchy can be throttled */ if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) @@ -443,7 +415,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } static __always_inline -@@ -5169,7 +5169,7 @@ static void hrtick_start_fair(struct rq +@@ -5127,7 +5127,7 @@ static void hrtick_start_fair(struct rq if (delta < 0) { if (rq->curr == p) @@ -452,7 +424,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return; } hrtick_start(rq, delta); -@@ -7009,7 +7009,7 @@ static void check_preempt_wakeup(struct +@@ -6729,7 +6729,7 @@ static void check_preempt_wakeup(struct return; preempt: @@ -461,7 +433,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * Only set the backward buddy when the current task is still * on the rq. This can happen when a wakeup gets interleaved -@@ -10282,7 +10282,7 @@ static void task_fork_fair(struct task_s +@@ -9984,7 +9984,7 @@ static void task_fork_fair(struct task_s * 'current' within the tree based on its new key value. */ swap(curr->vruntime, se->vruntime); @@ -470,7 +442,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } se->vruntime -= cfs_rq->min_vruntime; -@@ -10306,7 +10306,7 @@ prio_changed_fair(struct rq *rq, struct +@@ -10008,7 +10008,7 @@ prio_changed_fair(struct rq *rq, struct */ if (rq->curr == p) { if (p->prio > oldprio) @@ -481,9 +453,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } --- a/kernel/sched/features.h +++ b/kernel/sched/features.h -@@ -48,6 +48,9 @@ SCHED_FEAT(NONTASK_CAPACITY, true) +@@ -47,6 +47,9 @@ SCHED_FEAT(NONTASK_CAPACITY, true) - #ifdef CONFIG_PREEMPT_RT_FULL + #ifdef CONFIG_PREEMPT_RT SCHED_FEAT(TTWU_QUEUE, false) +# ifdef CONFIG_PREEMPT_LAZY +SCHED_FEAT(PREEMPT_LAZY, true) @@ -493,7 +465,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h -@@ -1786,6 +1786,15 @@ extern void reweight_task(struct task_st +@@ -1876,6 +1876,15 @@ extern void reweight_task(struct task_st extern void resched_curr(struct rq *rq); extern void resched_cpu(int cpu); @@ -511,15 +483,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c -@@ -2318,6 +2318,7 @@ tracing_generic_entry_update(struct trac +@@ -2335,6 +2335,7 @@ tracing_generic_entry_update(struct trac struct task_struct *tsk = current; entry->preempt_count = pc & 0xff; + entry->preempt_lazy_count = preempt_lazy_count(); entry->pid = (tsk) ? tsk->pid : 0; + entry->type = type; entry->flags = - #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT -@@ -2328,7 +2329,8 @@ tracing_generic_entry_update(struct trac +@@ -2346,7 +2347,8 @@ tracing_generic_entry_update(struct trac ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) | @@ -529,7 +501,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0; -@@ -3555,15 +3557,17 @@ unsigned long trace_total_entries(struct +@@ -3575,15 +3577,17 @@ unsigned long trace_total_entries(struct static void print_lat_help_header(struct seq_file *m) { @@ -542,21 +514,21 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> - "# ||||| / delay \n" - "# cmd pid |||||| time | caller \n" - "# \\ / ||||| \\ | / \n"); -+ seq_puts(m, "# _--------=> CPU# \n" -+ "# / _-------=> irqs-off \n" -+ "# | / _------=> need-resched \n" -+ "# || / _-----=> need-resched_lazy \n" -+ "# ||| / _----=> hardirq/softirq \n" -+ "# |||| / _---=> preempt-depth \n" -+ "# ||||| / _--=> preempt-lazy-depth\n" -+ "# |||||| / _-=> migrate-disable \n" -+ "# ||||||| / delay \n" -+ "# cmd pid |||||||| time | caller \n" -+ "# \\ / |||||||| \\ | / \n"); ++ seq_puts(m, "# _--------=> CPU# \n" ++ "# / _-------=> irqs-off \n" ++ "# | / _------=> need-resched \n" ++ "# || / _-----=> need-resched_lazy \n" ++ "# ||| / _----=> hardirq/softirq \n" ++ "# |||| / _---=> preempt-depth \n" ++ "# ||||| / _--=> preempt-lazy-depth\n" ++ "# |||||| / _-=> migrate-disable \n" ++ "# ||||||| / delay \n" ++ "# cmd pid |||||||| time | caller \n" ++ "# \\ / |||||||| \\ | / \n"); } static void print_event_info(struct trace_buffer *buf, struct seq_file *m) -@@ -3599,11 +3603,12 @@ static void print_func_help_header_irq(s +@@ -3619,11 +3623,12 @@ static void print_func_help_header_irq(s seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space); seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space); |