diff options
author | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2021-01-19 18:52:44 +0100 |
---|---|---|
committer | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2021-01-19 18:52:44 +0100 |
commit | f28ba1e31a424b09bb34028cacfaaf6bf621d616 (patch) | |
tree | b97d6eb8d8757b0f70c3eac1e025b62a1768b524 | |
parent | d34f4e619d0431891deda0ad924a48b1dcfe9a16 (diff) | |
download | linux-rt-f28ba1e31a424b09bb34028cacfaaf6bf621d616.tar.gz |
[ANNOUNCE] v5.10.8-rt24v5.10.8-rt24-patches
Dear RT folks!
I'm pleased to announce the v5.10.8-rt24 patch set.
Changes since v5.10.8-rt23:
- Added RT's version of __down_read_interruptible() which is used by
stable tree since v5.10.6.
- Updated the "tracing: Merge irqflags + preempt counter." patch to
the latest version posted for upstream.
[ The issues listed below were accidentally lost in the last announcement. ]
Known issues
- kdb/kgdb can easily deadlock.
- kmsg dumpers expecting not to be called in parallel can clobber
their temp buffer.
- netconsole triggers WARN.
The delta patch against v5.10.8-rt23 is appended below and can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.10/incr/patch-5.10.8-rt23-rt24.patch.xz
You can get this release via the git tree at:
git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.10.8-rt24
The RT patch against v5.10.8 can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patch-5.10.8-rt24.patch.xz
The split quilt queue is available at:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.8-rt24.tar.xz
Sebastian
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
20 files changed, 334 insertions, 289 deletions
diff --git a/patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch index b8336c56d4a4..9a900e3f4313 100644 --- a/patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch +++ b/patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch @@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/mm/page_alloc.c +++ b/mm/page_alloc.c -@@ -1329,7 +1329,7 @@ static inline void prefetch_buddy(struct +@@ -1331,7 +1331,7 @@ static inline void prefetch_buddy(struct } /* @@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> * Assumes all pages on list are in same zone, and of same order. * count is the number of pages to free. * -@@ -1340,14 +1340,40 @@ static inline void prefetch_buddy(struct +@@ -1342,14 +1342,40 @@ static inline void prefetch_buddy(struct * pinned" detection logic. */ static void free_pcppages_bulk(struct zone *zone, int count, @@ -71,7 +71,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * Ensure proper count is passed which otherwise would stuck in the -@@ -1384,7 +1410,7 @@ static void free_pcppages_bulk(struct zo +@@ -1386,7 +1412,7 @@ static void free_pcppages_bulk(struct zo if (bulkfree_pcp_prepare(page)) continue; @@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * We are going to put the page back to the global -@@ -1399,26 +1425,6 @@ static void free_pcppages_bulk(struct zo +@@ -1401,26 +1427,6 @@ static void free_pcppages_bulk(struct zo prefetch_buddy(page); } while (--count && --batch_free && !list_empty(list)); } @@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } static void free_one_page(struct zone *zone, -@@ -2935,13 +2941,18 @@ void drain_zone_pages(struct zone *zone, +@@ -2937,13 +2943,18 @@ void drain_zone_pages(struct zone *zone, { unsigned long flags; int to_drain, batch; @@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } #endif -@@ -2957,14 +2968,21 @@ static void drain_pages_zone(unsigned in +@@ -2959,14 +2970,21 @@ static void drain_pages_zone(unsigned in unsigned long flags; struct per_cpu_pageset *pset; struct per_cpu_pages *pcp; @@ -151,7 +151,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } /* -@@ -3193,7 +3211,10 @@ static void free_unref_page_commit(struc +@@ -3195,7 +3213,10 @@ static void free_unref_page_commit(struc pcp->count++; if (pcp->count >= pcp->high) { unsigned long batch = READ_ONCE(pcp->batch); diff --git a/patches/tracing-Merge-irqflags-preemt-counter-add-RT-bits.patch b/patches/0001-tracing-Merge-irqflags-preempt-counter.patch index bbdac118f98f..cfb3017ce1d3 100644 --- a/patches/tracing-Merge-irqflags-preemt-counter-add-RT-bits.patch +++ b/patches/0001-tracing-Merge-irqflags-preempt-counter.patch @@ -1,66 +1,76 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> -Date: Wed, 9 Dec 2020 16:19:57 +0100 -Subject: [PATCH] tracing: Merge irqflags + preemt counter, add RT bits - -PREEMPT_RT never reported "serving softirq". I took a look to see if it -could be changed. The tracing infrastructure examinates the preemtion -counter for that. PREEMPT_RT does not change the preemption counter -while disabling the bottom half or serving the softirqs in order to -remain preemptible. The in_serving_softirq() macro and the SOFTIRQ_OFFSET -define are still working but not on the preempt-counter. -I started to look how to integrate the RT bits regarding softirq. +Date: Tue, 12 Jan 2021 15:58:53 +0100 +Subject: [PATCH 1/3] tracing: Merge irqflags + preempt counter. +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit The state of the interrupts (irqflags) and the preemption counter are -passed down to tracing_generic_entry_update(). However only one bit of -irqflags is actually required: The on/off state. +both passed down to tracing_generic_entry_update(). Only one bit of +irqflags is actually required: The on/off state. The complete 32bit +of the preemption counter isn't needed. Just whether of the upper bits +(softirq, hardirq and NMI) are set and the preemption depth is needed. + The irqflags and the preemption counter could be evaluated early and the information stored in an integer `trace_ctx'. tracing_generic_entry_update() would use the upper bits as the -TRACE_FLAG_* and the lower 16bit as the preemption counter (considering -that 1 must be substracted from the counter in some cases). - -Whith this change the preemption counter is read in one place and the -relevant RT bits for softirq can be set there. +TRACE_FLAG_* and the lower 8bit as the disabled-preemption depth +(considering that one must be substracted from the counter in one +special cases). The actual preemption value is not used except for the tracing record. -The `irqflags' is also not used except for the _irqsave() locking in a -few spots. -As part of the patch I added __ to trace_event_buffer_commit() while -evaluating trace_event_buffer() for the struct trace_event_buffer usage -regarding the `pc' and `flags' members. It appears that those two can -also be merged into the `trace_ctx' integer. -With this change the callchain passes one argument less and evaluates -the flags early. A build with all tracers enabled on x86-64 with and -without the patch: +The `irqflags' variable is mostly used only for the tracing record. An +exception here is for instance wakeup_tracer_call() or +probe_wakeup_sched_switch() which explicilty disable interrupts and use +that `irqflags' to save (and restore) the IRQ state and to record the +state. + +Struct trace_event_buffer has also the `pc' and flags' members which can +be replaced with `trace_ctx' since their actual value is not used +outside of trace recording. + +This will reduce tracing_generic_entry_update() to simply assign values +to struct trace_entry. The evaluation of the TRACE_FLAG_* bits is moved +to _tracing_gen_ctx_flags() which replaces preempt_count() and +local_save_flags() invocations. + +As an example, ftrace_syscall_enter() may invoke: +- trace_buffer_lock_reserve() -> … -> tracing_generic_entry_update() +- event_trigger_unlock_commit() + -> ftrace_trace_stack() -> … -> tracing_generic_entry_update() + -> ftrace_trace_userstack() -> … -> tracing_generic_entry_update() + +In this case the TRACE_FLAG_* bits were evaluated three times. By using +the `trace_ctx' they are evaluated once and assigned three times. + +A build with all tracers enabled on x86-64 with and without the patch: text data bss dec hex filename -24301717 22148594 13996284 60446595 39a5783 vmlinux.old -24301248 22148850 13996284 60446382 39a56ae vmlinux.new +21970669 17084168 7639260 46694097 2c87ed1 vmlinux.old +21970293 17084168 7639260 46693721 2c87d59 vmlinux.new -data increased by 256 bytes, text shrank by 469 bytes. +text shrank by 379 bytes, data remained constant. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- - include/linux/trace_events.h | 27 +++- - include/trace/trace_events.h | 2 + include/linux/trace_events.h | 25 +++- kernel/trace/blktrace.c | 17 +- - kernel/trace/trace.c | 217 +++++++++++++++++++---------------- + kernel/trace/trace.c | 208 ++++++++++++++++++----------------- kernel/trace/trace.h | 38 ++---- - kernel/trace/trace_branch.c | 6 + kernel/trace/trace_branch.c | 6 - kernel/trace/trace_event_perf.c | 5 - kernel/trace/trace_events.c | 15 -- - kernel/trace/trace_events_inject.c | 8 - - kernel/trace/trace_events_synth.c | 4 - kernel/trace/trace_functions.c | 26 +--- + kernel/trace/trace_events.c | 18 +-- + kernel/trace/trace_events_inject.c | 6 - + kernel/trace/trace_functions.c | 28 ++-- kernel/trace/trace_functions_graph.c | 32 ++--- kernel/trace/trace_hwlat.c | 7 - - kernel/trace/trace_irqsoff.c | 86 +++++-------- - kernel/trace/trace_kprobe.c | 14 -- + kernel/trace/trace_irqsoff.c | 86 ++++++-------- + kernel/trace/trace_kprobe.c | 10 - kernel/trace/trace_mmiotrace.c | 14 +- - kernel/trace/trace_sched_wakeup.c | 61 +++++---- + kernel/trace/trace_sched_wakeup.c | 71 +++++------ kernel/trace/trace_syscalls.c | 20 +-- kernel/trace/trace_uprobe.c | 4 - 19 files changed, 303 insertions(+), 300 deletions(-) + 17 files changed, 287 insertions(+), 308 deletions(-) --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -109,26 +119,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> struct pt_regs *regs; }; -@@ -241,7 +252,7 @@ void *trace_event_buffer_reserve(struct - struct trace_event_file *trace_file, - unsigned long len); - --void trace_event_buffer_commit(struct trace_event_buffer *fbuffer); -+void trace_event_buffer_commit__(struct trace_event_buffer *fbuffer); - - enum { - TRACE_EVENT_FL_FILTERED_BIT, ---- a/include/trace/trace_events.h -+++ b/include/trace/trace_events.h -@@ -694,7 +694,7 @@ trace_event_raw_event_##call(void *__dat - \ - { assign; } \ - \ -- trace_event_buffer_commit(&fbuffer); \ -+ trace_event_buffer_commit__(&fbuffer); \ - } - /* - * The ftrace_test_probe is compiled out, it is only here as a build time check --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -72,17 +72,17 @@ static void trace_note(struct blk_trace @@ -343,39 +333,47 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ret = 1; out: -@@ -2584,36 +2578,78 @@ enum print_line_t trace_handle_return(st +@@ -2584,36 +2578,69 @@ enum print_line_t trace_handle_return(st } EXPORT_SYMBOL_GPL(trace_handle_return); -void -tracing_generic_entry_update(struct trace_entry *entry, unsigned short type, - unsigned long flags, int pc) -+static unsigned int __tracing_gen_ctx_flags(unsigned long irqflags) -+{ ++unsigned int _tracing_gen_ctx_flags(unsigned long irqflags) + { +- struct task_struct *tsk = current; + unsigned int trace_flags = 0; + unsigned int pc; + + pc = preempt_count(); -+ -+#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT + +- entry->preempt_count = pc & 0xff; +- entry->pid = (tsk) ? tsk->pid : 0; +- entry->type = type; +- entry->flags = + #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT +- (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | + if (irqs_disabled_flags(irqflags)) + trace_flags |= TRACE_FLAG_IRQS_OFF; -+#else -+ trace_flags |= TRACE_FLAG_IRQS_NOSUPPORT; -+#endif + #else +- TRACE_FLAG_IRQS_NOSUPPORT | ++ trace_flags |= TRACE_FLAG_IRQS_NOSUPPORT; + #endif +- ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) | +- ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | +- ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) | +- (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | +- (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); + + if (pc & NMI_MASK) + trace_flags |= TRACE_FLAG_NMI; + if (pc & HARDIRQ_MASK) + trace_flags |= TRACE_FLAG_HARDIRQ; + -+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) { -+ if (in_serving_softirq()) -+ trace_flags |= TRACE_FLAG_SOFTIRQ; -+ } else { -+ if (pc & SOFTIRQ_OFFSET) -+ trace_flags |= TRACE_FLAG_SOFTIRQ; -+ } ++ if (pc & SOFTIRQ_OFFSET) ++ trace_flags |= TRACE_FLAG_SOFTIRQ; ++ + if (tif_need_resched()) + trace_flags |= TRACE_FLAG_NEED_RESCHED; + if (test_preempt_need_resched()) @@ -383,32 +381,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> + return (trace_flags << 16) | (pc & 0xff); +} + -+unsigned int _tracing_gen_ctx_flags(unsigned long irqflags) - { -- struct task_struct *tsk = current; -+ return __tracing_gen_ctx_flags(irqflags); -+} -+ +unsigned int tracing_gen_ctx_flags(void) +{ + unsigned long irqflags; - -- entry->preempt_count = pc & 0xff; -- entry->pid = (tsk) ? tsk->pid : 0; -- entry->type = type; -- entry->flags = - #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT -- (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | ++ ++#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT + local_save_flags(irqflags); - #else -- TRACE_FLAG_IRQS_NOSUPPORT | ++#else + irqflags = 0; - #endif -- ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) | -- ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | -- ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) | -- (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | -- (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); ++#endif + return _tracing_gen_ctx_flags(irqflags); +} + @@ -440,7 +421,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); -@@ -2733,7 +2769,7 @@ struct ring_buffer_event * +@@ -2733,7 +2760,7 @@ struct ring_buffer_event * trace_event_buffer_lock_reserve(struct trace_buffer **current_rb, struct trace_event_file *trace_file, int type, unsigned long len, @@ -449,7 +430,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { struct ring_buffer_event *entry; int val; -@@ -2746,15 +2782,15 @@ trace_event_buffer_lock_reserve(struct t +@@ -2746,15 +2773,15 @@ trace_event_buffer_lock_reserve(struct t /* Try to use the per cpu buffer first */ val = this_cpu_inc_return(trace_buffered_event_cnt); if (val == 1) { @@ -468,7 +449,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * If tracing is off, but we have triggers enabled * we still need to look at the event data. Use the temp_buffer -@@ -2763,8 +2799,8 @@ trace_event_buffer_lock_reserve(struct t +@@ -2763,8 +2790,8 @@ trace_event_buffer_lock_reserve(struct t */ if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) { *current_rb = temp_buffer; @@ -479,28 +460,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } return entry; } -@@ -2841,7 +2877,7 @@ int tracepoint_printk_sysctl(struct ctl_ - return ret; - } - --void trace_event_buffer_commit(struct trace_event_buffer *fbuffer) -+void trace_event_buffer_commit__(struct trace_event_buffer *fbuffer) - { - if (static_key_false(&tracepoint_printk_key.key)) - output_printk(fbuffer); -@@ -2850,9 +2886,9 @@ void trace_event_buffer_commit(struct tr +@@ -2850,7 +2877,7 @@ void trace_event_buffer_commit(struct tr ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT); event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer, fbuffer->event, fbuffer->entry, - fbuffer->flags, fbuffer->pc, fbuffer->regs); + fbuffer->trace_ctx, fbuffer->regs); } --EXPORT_SYMBOL_GPL(trace_event_buffer_commit); -+EXPORT_SYMBOL_GPL(trace_event_buffer_commit__); + EXPORT_SYMBOL_GPL(trace_event_buffer_commit); - /* - * Skip 3: -@@ -2866,7 +2902,7 @@ EXPORT_SYMBOL_GPL(trace_event_buffer_com +@@ -2866,7 +2893,7 @@ EXPORT_SYMBOL_GPL(trace_event_buffer_com void trace_buffer_unlock_commit_regs(struct trace_array *tr, struct trace_buffer *buffer, struct ring_buffer_event *event, @@ -509,7 +478,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> struct pt_regs *regs) { __buffer_unlock_commit(buffer, event); -@@ -2877,8 +2913,8 @@ void trace_buffer_unlock_commit_regs(str +@@ -2877,8 +2904,8 @@ void trace_buffer_unlock_commit_regs(str * and mmiotrace, but that's ok if they lose a function or * two. They are not that meaningful. */ @@ -520,7 +489,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } /* -@@ -2892,9 +2928,8 @@ trace_buffer_unlock_commit_nostack(struc +@@ -2892,9 +2919,8 @@ trace_buffer_unlock_commit_nostack(struc } void @@ -532,7 +501,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { struct trace_event_call *call = &event_function; struct trace_buffer *buffer = tr->array_buffer.buffer; -@@ -2902,7 +2937,7 @@ trace_function(struct trace_array *tr, +@@ -2902,7 +2928,7 @@ trace_function(struct trace_array *tr, struct ftrace_entry *entry; event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), @@ -541,7 +510,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (!event) return; entry = ring_buffer_event_data(event); -@@ -2936,8 +2971,8 @@ static DEFINE_PER_CPU(struct ftrace_stac +@@ -2936,8 +2962,8 @@ static DEFINE_PER_CPU(struct ftrace_stac static DEFINE_PER_CPU(int, ftrace_stack_reserve); static void __ftrace_trace_stack(struct trace_buffer *buffer, @@ -552,7 +521,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { struct trace_event_call *call = &event_kernel_stack; struct ring_buffer_event *event; -@@ -2984,7 +3019,7 @@ static void __ftrace_trace_stack(struct +@@ -2984,7 +3010,7 @@ static void __ftrace_trace_stack(struct size = nr_entries * sizeof(unsigned long); event = __trace_buffer_lock_reserve(buffer, TRACE_STACK, @@ -561,7 +530,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (!event) goto out; entry = ring_buffer_event_data(event); -@@ -3005,22 +3040,22 @@ static void __ftrace_trace_stack(struct +@@ -3005,22 +3031,22 @@ static void __ftrace_trace_stack(struct static inline void ftrace_trace_stack(struct trace_array *tr, struct trace_buffer *buffer, @@ -590,7 +559,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return; } -@@ -3034,7 +3069,7 @@ void __trace_stack(struct trace_array *t +@@ -3034,7 +3060,7 @@ void __trace_stack(struct trace_array *t return; rcu_irq_enter_irqson(); @@ -599,7 +568,19 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> rcu_irq_exit_irqson(); } -@@ -3056,7 +3091,7 @@ void trace_dump_stack(int skip) +@@ -3044,19 +3070,15 @@ void __trace_stack(struct trace_array *t + */ + void trace_dump_stack(int skip) + { +- unsigned long flags; +- + if (tracing_disabled || tracing_selftest_running) + return; + +- local_save_flags(flags); +- + #ifndef CONFIG_UNWINDER_ORC + /* Skip 1 to skip this function. */ skip++; #endif __ftrace_trace_stack(global_trace.array_buffer.buffer, @@ -608,7 +589,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } EXPORT_SYMBOL_GPL(trace_dump_stack); -@@ -3065,7 +3100,7 @@ static DEFINE_PER_CPU(int, user_stack_co +@@ -3065,7 +3087,7 @@ static DEFINE_PER_CPU(int, user_stack_co static void ftrace_trace_userstack(struct trace_array *tr, @@ -617,7 +598,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { struct trace_event_call *call = &event_user_stack; struct ring_buffer_event *event; -@@ -3092,7 +3127,7 @@ ftrace_trace_userstack(struct trace_arra +@@ -3092,7 +3114,7 @@ ftrace_trace_userstack(struct trace_arra __this_cpu_inc(user_stack_count); event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, @@ -626,7 +607,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (!event) goto out_drop_count; entry = ring_buffer_event_data(event); -@@ -3112,7 +3147,7 @@ ftrace_trace_userstack(struct trace_arra +@@ -3112,7 +3134,7 @@ ftrace_trace_userstack(struct trace_arra #else /* CONFIG_USER_STACKTRACE_SUPPORT */ static void ftrace_trace_userstack(struct trace_array *tr, struct trace_buffer *buffer, @@ -635,7 +616,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { } #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */ -@@ -3242,9 +3277,9 @@ int trace_vbprintk(unsigned long ip, con +@@ -3242,9 +3264,9 @@ int trace_vbprintk(unsigned long ip, con struct trace_buffer *buffer; struct trace_array *tr = &global_trace; struct bprint_entry *entry; @@ -647,7 +628,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (unlikely(tracing_selftest_running || tracing_disabled)) return 0; -@@ -3252,7 +3287,7 @@ int trace_vbprintk(unsigned long ip, con +@@ -3252,7 +3274,7 @@ int trace_vbprintk(unsigned long ip, con /* Don't pollute graph traces with trace_vprintk internals */ pause_graph_tracing(); @@ -656,7 +637,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> preempt_disable_notrace(); tbuffer = get_trace_buf(); -@@ -3266,12 +3301,11 @@ int trace_vbprintk(unsigned long ip, con +@@ -3266,12 +3288,11 @@ int trace_vbprintk(unsigned long ip, con if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) goto out_put; @@ -670,7 +651,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (!event) goto out; entry = ring_buffer_event_data(event); -@@ -3281,7 +3315,7 @@ int trace_vbprintk(unsigned long ip, con +@@ -3281,7 +3302,7 @@ int trace_vbprintk(unsigned long ip, con memcpy(entry->buf, tbuffer, sizeof(u32) * len); if (!call_filter_check_discard(call, entry, buffer, event)) { __buffer_unlock_commit(buffer, event); @@ -679,7 +660,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } out: -@@ -3304,9 +3338,9 @@ static int +@@ -3304,9 +3325,9 @@ static int { struct trace_event_call *call = &event_print; struct ring_buffer_event *event; @@ -691,7 +672,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> char *tbuffer; if (tracing_disabled || tracing_selftest_running) -@@ -3315,7 +3349,7 @@ static int +@@ -3315,7 +3336,7 @@ static int /* Don't pollute graph traces with trace_vprintk internals */ pause_graph_tracing(); @@ -700,7 +681,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> preempt_disable_notrace(); -@@ -3327,11 +3361,10 @@ static int +@@ -3327,11 +3348,10 @@ static int len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); @@ -713,7 +694,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (!event) goto out; entry = ring_buffer_event_data(event); -@@ -3340,7 +3373,7 @@ static int +@@ -3340,7 +3360,7 @@ static int memcpy(&entry->buf, tbuffer, len + 1); if (!call_filter_check_discard(call, entry, buffer, event)) { __buffer_unlock_commit(buffer, event); @@ -722,7 +703,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } out: -@@ -6653,7 +6686,6 @@ tracing_mark_write(struct file *filp, co +@@ -6653,7 +6673,6 @@ tracing_mark_write(struct file *filp, co enum event_trigger_type tt = ETT_NONE; struct trace_buffer *buffer; struct print_entry *entry; @@ -730,7 +711,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ssize_t written; int size; int len; -@@ -6673,7 +6705,6 @@ tracing_mark_write(struct file *filp, co +@@ -6673,7 +6692,6 @@ tracing_mark_write(struct file *filp, co BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); @@ -738,7 +719,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */ /* If less than "<faulted>", then make sure we can still add that */ -@@ -6682,7 +6713,7 @@ tracing_mark_write(struct file *filp, co +@@ -6682,7 +6700,7 @@ tracing_mark_write(struct file *filp, co buffer = tr->array_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, @@ -747,7 +728,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (unlikely(!event)) /* Ring buffer disabled, return as if not open for write */ return -EBADF; -@@ -6734,7 +6765,6 @@ tracing_mark_raw_write(struct file *filp +@@ -6734,7 +6752,6 @@ tracing_mark_raw_write(struct file *filp struct ring_buffer_event *event; struct trace_buffer *buffer; struct raw_data_entry *entry; @@ -755,7 +736,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ssize_t written; int size; int len; -@@ -6756,14 +6786,13 @@ tracing_mark_raw_write(struct file *filp +@@ -6756,14 +6773,13 @@ tracing_mark_raw_write(struct file *filp BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); @@ -903,7 +884,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> struct trace_branch *entry; unsigned long flags; - int pc; -+ unsigned int pc; ++ unsigned int trace_ctx; const char *p; if (current->trace_recursion & TRACE_BRANCH_BIT) @@ -962,10 +943,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (!fbuffer->event) return NULL; -@@ -3679,11 +3676,11 @@ function_test_events_call(unsigned long +@@ -3678,12 +3675,11 @@ function_test_events_call(unsigned long + struct trace_buffer *buffer; struct ring_buffer_event *event; struct ftrace_entry *entry; - unsigned long flags; +- unsigned long flags; + unsigned int trace_ctx; long disabled; int cpu; @@ -976,8 +958,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> preempt_disable_notrace(); cpu = raw_smp_processor_id(); disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); -@@ -3695,7 +3692,7 @@ function_test_events_call(unsigned long +@@ -3691,11 +3687,9 @@ function_test_events_call(unsigned long + if (disabled != 1) + goto out; +- local_save_flags(flags); +- event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file, TRACE_FN, sizeof(*entry), - flags, pc); @@ -985,7 +971,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (!event) goto out; entry = ring_buffer_event_data(event); -@@ -3703,7 +3700,7 @@ function_test_events_call(unsigned long +@@ -3703,7 +3697,7 @@ function_test_events_call(unsigned long entry->parent_ip = parent_ip; event_trigger_unlock_commit(&event_trace_file, buffer, event, @@ -996,15 +982,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> preempt_enable_notrace(); --- a/kernel/trace/trace_events_inject.c +++ b/kernel/trace/trace_events_inject.c -@@ -25,7 +25,7 @@ trace_inject_entry(struct trace_event_fi - if (entry) { - memcpy(entry, rec, len); - written = len; -- trace_event_buffer_commit(&fbuffer); -+ trace_event_buffer_commit__(&fbuffer); - } - rcu_read_unlock_sched(); - @@ -192,7 +192,6 @@ static void *trace_alloc_entry(struct tr static int parse_entry(char *str, struct trace_event_call *call, void **pentry) { @@ -1025,26 +1002,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> while ((len = parse_field(str, call, &field, &val)) > 0) { if (is_function_field(field)) ---- a/kernel/trace/trace_events_synth.c -+++ b/kernel/trace/trace_events_synth.c -@@ -504,7 +504,7 @@ static notrace void trace_event_raw_even - } - } - -- trace_event_buffer_commit(&fbuffer); -+ trace_event_buffer_commit__(&fbuffer); - out: - ring_buffer_nest_end(buffer); - } -@@ -1494,7 +1494,7 @@ static inline int - static inline void - __synth_event_trace_end(struct synth_event_trace_state *trace_state) - { -- trace_event_buffer_commit(&trace_state->fbuffer); -+ trace_event_buffer_commit__(&trace_state->fbuffer); - - ring_buffer_nest_end(trace_state->buffer); - } --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -133,15 +133,14 @@ function_trace_call(unsigned long ip, un @@ -1084,7 +1041,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> long disabled; int cpu; - int pc; -+ int trace_ctx; ++ unsigned int trace_ctx; if (unlikely(!tr->function_enabled)) return; @@ -1095,20 +1052,21 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> - pc = preempt_count(); - trace_function(tr, ip, parent_ip, flags, pc); - __trace_stack(tr, flags, STACK_SKIP, pc); -+ trace_ctx = tracing_gen_ctx_flags(); ++ trace_ctx = _tracing_gen_ctx_flags(flags); + trace_function(tr, ip, parent_ip, trace_ctx); + __trace_stack(tr, trace_ctx, STACK_SKIP); } atomic_dec(&data->disabled); -@@ -408,12 +406,12 @@ ftrace_traceoff(unsigned long ip, unsign +@@ -407,13 +405,11 @@ ftrace_traceoff(unsigned long ip, unsign + static __always_inline void trace_stack(struct trace_array *tr) { - unsigned long flags; +- unsigned long flags; - int pc; + unsigned int trace_ctx; - local_save_flags(flags); +- local_save_flags(flags); - pc = preempt_count(); + trace_ctx = tracing_gen_ctx_flags(); @@ -1500,15 +1458,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (!fbuffer.event) return; -@@ -1403,7 +1402,7 @@ static nokprobe_inline void - entry->ip = (unsigned long)tk->rp.kp.addr; - store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize); - -- trace_event_buffer_commit(&fbuffer); -+ trace_event_buffer_commit__(&fbuffer); - } - - static void @@ -1432,8 +1431,7 @@ static nokprobe_inline void if (trace_trigger_soft_disabled(trace_file)) return; @@ -1528,15 +1477,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (!fbuffer.event) return; -@@ -1451,7 +1449,7 @@ static nokprobe_inline void - entry->ret_ip = (unsigned long)ri->ret_addr; - store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize); - -- trace_event_buffer_commit(&fbuffer); -+ trace_event_buffer_commit__(&fbuffer); - } - - static void --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c @@ -300,10 +300,11 @@ static void __trace_mmiotrace_rw(struct @@ -1605,17 +1545,18 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> preempt_disable_notrace(); cpu = raw_smp_processor_id(); -@@ -117,7 +117,8 @@ static int wakeup_graph_entry(struct ftr +@@ -116,8 +116,8 @@ static int wakeup_graph_entry(struct ftr + { struct trace_array *tr = wakeup_trace; struct trace_array_cpu *data; - unsigned long flags; +- unsigned long flags; - int pc, ret = 0; + unsigned int trace_ctx; + int ret = 0; if (ftrace_graph_ignore_func(trace)) return 0; -@@ -131,11 +132,11 @@ static int wakeup_graph_entry(struct ftr +@@ -131,11 +131,10 @@ static int wakeup_graph_entry(struct ftr if (ftrace_graph_notrace_addr(trace->func)) return 1; @@ -1623,16 +1564,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> + if (!func_prolog_preempt_disable(tr, &data, &trace_ctx)) return 0; - local_save_flags(flags); +- local_save_flags(flags); - ret = __trace_graph_entry(tr, trace, flags, pc); + ret = __trace_graph_entry(tr, trace, trace_ctx); atomic_dec(&data->disabled); preempt_enable_notrace(); -@@ -147,15 +148,15 @@ static void wakeup_graph_return(struct f +@@ -146,16 +145,14 @@ static void wakeup_graph_return(struct f + { struct trace_array *tr = wakeup_trace; struct trace_array_cpu *data; - unsigned long flags; +- unsigned long flags; - int pc; + unsigned int trace_ctx; @@ -1642,13 +1584,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> + if (!func_prolog_preempt_disable(tr, &data, &trace_ctx)) return; - local_save_flags(flags); +- local_save_flags(flags); - __trace_graph_return(tr, trace, flags, pc); + __trace_graph_return(tr, trace, trace_ctx); atomic_dec(&data->disabled); preempt_enable_notrace(); -@@ -217,13 +218,13 @@ wakeup_tracer_call(unsigned long ip, uns +@@ -217,13 +214,13 @@ wakeup_tracer_call(unsigned long ip, uns struct trace_array *tr = wakeup_trace; struct trace_array_cpu *data; unsigned long flags; @@ -1665,7 +1607,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> local_irq_restore(flags); atomic_dec(&data->disabled); -@@ -303,12 +304,12 @@ static void wakeup_print_header(struct s +@@ -303,12 +300,12 @@ static void wakeup_print_header(struct s static void __trace_function(struct trace_array *tr, unsigned long ip, unsigned long parent_ip, @@ -1681,7 +1623,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set) -@@ -375,7 +376,7 @@ static void +@@ -375,7 +372,7 @@ static void tracing_sched_switch_trace(struct trace_array *tr, struct task_struct *prev, struct task_struct *next, @@ -1690,7 +1632,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { struct trace_event_call *call = &event_context_switch; struct trace_buffer *buffer = tr->array_buffer.buffer; -@@ -383,7 +384,7 @@ tracing_sched_switch_trace(struct trace_ +@@ -383,7 +380,7 @@ tracing_sched_switch_trace(struct trace_ struct ctx_switch_entry *entry; event = trace_buffer_lock_reserve(buffer, TRACE_CTX, @@ -1699,7 +1641,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (!event) return; entry = ring_buffer_event_data(event); -@@ -396,14 +397,14 @@ tracing_sched_switch_trace(struct trace_ +@@ -396,14 +393,14 @@ tracing_sched_switch_trace(struct trace_ entry->next_cpu = task_cpu(next); if (!call_filter_check_discard(call, entry, buffer, event)) @@ -1716,7 +1658,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { struct trace_event_call *call = &event_wakeup; struct ring_buffer_event *event; -@@ -411,7 +412,7 @@ tracing_sched_wakeup_trace(struct trace_ +@@ -411,7 +408,7 @@ tracing_sched_wakeup_trace(struct trace_ struct trace_buffer *buffer = tr->array_buffer.buffer; event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, @@ -1725,7 +1667,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (!event) return; entry = ring_buffer_event_data(event); -@@ -424,7 +425,7 @@ tracing_sched_wakeup_trace(struct trace_ +@@ -424,7 +421,7 @@ tracing_sched_wakeup_trace(struct trace_ entry->next_cpu = task_cpu(wakee); if (!call_filter_check_discard(call, entry, buffer, event)) @@ -1734,7 +1676,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } static void notrace -@@ -436,7 +437,7 @@ probe_wakeup_sched_switch(void *ignore, +@@ -436,7 +433,7 @@ probe_wakeup_sched_switch(void *ignore, unsigned long flags; long disabled; int cpu; @@ -1743,16 +1685,25 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> tracing_record_cmdline(prev); -@@ -455,7 +456,7 @@ probe_wakeup_sched_switch(void *ignore, +@@ -455,8 +452,6 @@ probe_wakeup_sched_switch(void *ignore, if (next != wakeup_task) return; - pc = preempt_count(); -+ trace_ctx = tracing_gen_ctx_flags(); - +- /* disable local data, not wakeup_cpu data */ cpu = raw_smp_processor_id(); -@@ -473,9 +474,9 @@ probe_wakeup_sched_switch(void *ignore, + disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled); +@@ -464,6 +459,8 @@ probe_wakeup_sched_switch(void *ignore, + goto out; + + local_irq_save(flags); ++ trace_ctx = _tracing_gen_ctx_flags(flags); ++ + arch_spin_lock(&wakeup_lock); + + /* We could race with grabbing wakeup_lock */ +@@ -473,9 +470,9 @@ probe_wakeup_sched_switch(void *ignore, /* The task we are waiting for is waking up */ data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu); @@ -1765,26 +1716,37 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> T0 = data->preempt_timestamp; T1 = ftrace_now(cpu); -@@ -529,7 +530,7 @@ probe_wakeup(void *ignore, struct task_s +@@ -527,9 +524,8 @@ probe_wakeup(void *ignore, struct task_s + { + struct trace_array_cpu *data; int cpu = smp_processor_id(); - unsigned long flags; +- unsigned long flags; long disabled; - int pc; + unsigned int trace_ctx; if (likely(!tracer_enabled)) return; -@@ -550,7 +551,7 @@ probe_wakeup(void *ignore, struct task_s +@@ -550,11 +546,12 @@ probe_wakeup(void *ignore, struct task_s (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio))) return; - pc = preempt_count(); -+ trace_ctx = tracing_gen_ctx_flags(); disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled); if (unlikely(disabled != 1)) goto out; -@@ -585,15 +586,15 @@ probe_wakeup(void *ignore, struct task_s ++ trace_ctx = tracing_gen_ctx_flags(); ++ + /* interrupts should be off from try_to_wake_up */ + arch_spin_lock(&wakeup_lock); + +@@ -581,19 +578,17 @@ probe_wakeup(void *ignore, struct task_s + + wakeup_task = get_task_struct(p); + +- local_save_flags(flags); +- data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu); data->preempt_timestamp = ftrace_now(cpu); - tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); diff --git a/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch index 5de868a58f28..4ea0a4e233ba 100644 --- a/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch +++ b/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch @@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/mm/page_alloc.c +++ b/mm/page_alloc.c -@@ -1339,8 +1339,8 @@ static inline void prefetch_buddy(struct +@@ -1341,8 +1341,8 @@ static inline void prefetch_buddy(struct * And clear the zone's pages_scanned counter, to hold off the "all pages are * pinned" detection logic. */ @@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { bool isolated_pageblocks; struct page *page, *tmp; -@@ -1355,12 +1355,27 @@ static void free_pcppages_bulk(struct zo +@@ -1357,12 +1357,27 @@ static void free_pcppages_bulk(struct zo */ list_for_each_entry_safe(page, tmp, head, lru) { int mt = get_pcppage_migratetype(page); @@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> __free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE); trace_mm_page_pcpu_drain(page, 0, mt); } -@@ -2952,7 +2967,7 @@ void drain_zone_pages(struct zone *zone, +@@ -2954,7 +2969,7 @@ void drain_zone_pages(struct zone *zone, local_irq_restore(flags); if (to_drain > 0) @@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } #endif -@@ -2982,7 +2997,7 @@ static void drain_pages_zone(unsigned in +@@ -2984,7 +2999,7 @@ static void drain_pages_zone(unsigned in local_irq_restore(flags); if (count) @@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } /* -@@ -3181,7 +3196,8 @@ static bool free_unref_page_prepare(stru +@@ -3183,7 +3198,8 @@ static bool free_unref_page_prepare(stru return true; } @@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { struct zone *zone = page_zone(page); struct per_cpu_pages *pcp; -@@ -3211,10 +3227,8 @@ static void free_unref_page_commit(struc +@@ -3213,10 +3229,8 @@ static void free_unref_page_commit(struc pcp->count++; if (pcp->count >= pcp->high) { unsigned long batch = READ_ONCE(pcp->batch); @@ -96,7 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } } -@@ -3225,13 +3239,17 @@ void free_unref_page(struct page *page) +@@ -3227,13 +3241,17 @@ void free_unref_page(struct page *page) { unsigned long flags; unsigned long pfn = page_to_pfn(page); @@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } /* -@@ -3242,6 +3260,11 @@ void free_unref_page_list(struct list_he +@@ -3244,6 +3262,11 @@ void free_unref_page_list(struct list_he struct page *page, *next; unsigned long flags, pfn; int batch_count = 0; @@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* Prepare pages for freeing */ list_for_each_entry_safe(page, next, list, lru) { -@@ -3254,10 +3277,12 @@ void free_unref_page_list(struct list_he +@@ -3256,10 +3279,12 @@ void free_unref_page_list(struct list_he local_irq_save(flags); list_for_each_entry_safe(page, next, list, lru) { unsigned long pfn = page_private(page); @@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * Guard against excessive IRQ disabled times when we get -@@ -3270,6 +3295,21 @@ void free_unref_page_list(struct list_he +@@ -3272,6 +3297,21 @@ void free_unref_page_list(struct list_he } } local_irq_restore(flags); diff --git a/patches/0002-tracing-Use-in_serving_softirq-to-deduct-softirq-sta.patch b/patches/0002-tracing-Use-in_serving_softirq-to-deduct-softirq-sta.patch new file mode 100644 index 000000000000..0cf23aad2ca6 --- /dev/null +++ b/patches/0002-tracing-Use-in_serving_softirq-to-deduct-softirq-sta.patch @@ -0,0 +1,37 @@ +From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Date: Tue, 12 Jan 2021 20:37:54 +0100 +Subject: [PATCH 2/3] tracing: Use in_serving_softirq() to deduct softirq + status. + +PREEMPT_RT does not report "serving softirq" because the tracing core +looks at the preemption counter while PREEMPT_RT does not update it +while processing softirqs in order to remain preemptible. The +information is stored somewhere else. +The in_serving_softirq() macro and the SOFTIRQ_OFFSET define are still +working but not on the preempt-counter. + +Use in_serving_softirq() macro which works on PREEMPT_RT. On !PREEMPT_RT +the compiler (gcc-10 / clang-11) is smart enough to optimize the +in_serving_softirq() related read of the preemption counter away. +The only difference I noticed by using in_serving_softirq() on +!PREEMPT_RT is that gcc-10 implemented tracing_gen_ctx_flags() as +reading FLAG, jmp _tracing_gen_ctx_flags(). Without in_serving_softirq() +it inlined _tracing_gen_ctx_flags() into tracing_gen_ctx_flags(). + +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + kernel/trace/trace.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -2596,8 +2596,7 @@ unsigned int _tracing_gen_ctx_flags(unsi + trace_flags |= TRACE_FLAG_NMI; + if (pc & HARDIRQ_MASK) + trace_flags |= TRACE_FLAG_HARDIRQ; +- +- if (pc & SOFTIRQ_OFFSET) ++ if (in_serving_softirq()) + trace_flags |= TRACE_FLAG_SOFTIRQ; + + if (tif_need_resched()) diff --git a/patches/0003-blk-mq-Use-llist_head-for-blk_cpu_done.patch b/patches/0003-blk-mq-Use-llist_head-for-blk_cpu_done.patch index 31d6de49670a..fca0cc1cd831 100644 --- a/patches/0003-blk-mq-Use-llist_head-for-blk_cpu_done.patch +++ b/patches/0003-blk-mq-Use-llist_head-for-blk_cpu_done.patch @@ -154,7 +154,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD, --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h -@@ -156,7 +156,7 @@ struct request { +@@ -153,7 +153,7 @@ struct request { */ union { struct hlist_node hash; /* merge hash */ diff --git a/patches/0003-tracing-Remove-NULL-check-from-current-in-tracing_ge.patch b/patches/0003-tracing-Remove-NULL-check-from-current-in-tracing_ge.patch new file mode 100644 index 000000000000..10e686b4bf29 --- /dev/null +++ b/patches/0003-tracing-Remove-NULL-check-from-current-in-tracing_ge.patch @@ -0,0 +1,32 @@ +From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Date: Tue, 12 Jan 2021 21:01:38 +0100 +Subject: [PATCH 3/3] tracing: Remove NULL check from current in + tracing_generic_entry_update(). + +I can't imagine when or why `current' would return a NULL pointer. This +check was added in commit + 72829bc3d63cd ("ftrace: move enums to ftrace.h and make helper function global") + +but it doesn't give me hint why it was needed. + +Assume `current' never returns a NULL pointer and remove the check. + +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + include/linux/trace_events.h | 4 +--- + 1 file changed, 1 insertion(+), 3 deletions(-) + +--- a/include/linux/trace_events.h ++++ b/include/linux/trace_events.h +@@ -152,10 +152,8 @@ static inline void tracing_generic_entry + unsigned short type, + unsigned int trace_ctx) + { +- struct task_struct *tsk = current; +- + entry->preempt_count = trace_ctx & 0xff; +- entry->pid = (tsk) ? tsk->pid : 0; ++ entry->pid = current->pid; + entry->type = type; + entry->flags = trace_ctx >> 16; + } diff --git a/patches/0005-workqueue-Manually-break-affinity-on-hotplug.patch b/patches/0005-workqueue-Manually-break-affinity-on-hotplug.patch index c952d30c830c..91eefdd54c6b 100644 --- a/patches/0005-workqueue-Manually-break-affinity-on-hotplug.patch +++ b/patches/0005-workqueue-Manually-break-affinity-on-hotplug.patch @@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/workqueue.c +++ b/kernel/workqueue.c -@@ -4908,6 +4908,10 @@ static void unbind_workers(int cpu) +@@ -4915,6 +4915,10 @@ static void unbind_workers(int cpu) pool->flags |= POOL_DISASSOCIATED; raw_spin_unlock_irq(&pool->lock); diff --git a/patches/0018-locking-rtmutex-add-rwsem-implementation-based-on-rt.patch b/patches/0018-locking-rtmutex-add-rwsem-implementation-based-on-rt.patch index a3f5e00e9a26..2529c5f76443 100644 --- a/patches/0018-locking-rtmutex-add-rwsem-implementation-based-on-rt.patch +++ b/patches/0018-locking-rtmutex-add-rwsem-implementation-based-on-rt.patch @@ -42,15 +42,15 @@ the approach. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- - include/linux/rwsem-rt.h | 69 ++++++++++ - kernel/locking/rwsem-rt.c | 307 ++++++++++++++++++++++++++++++++++++++++++++++ - 2 files changed, 376 insertions(+) + include/linux/rwsem-rt.h | 70 ++++++++++ + kernel/locking/rwsem-rt.c | 318 ++++++++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 388 insertions(+) create mode 100644 include/linux/rwsem-rt.h create mode 100644 kernel/locking/rwsem-rt.c --- /dev/null +++ b/include/linux/rwsem-rt.h -@@ -0,0 +1,69 @@ +@@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0-only +#ifndef _LINUX_RWSEM_RT_H +#define _LINUX_RWSEM_RT_H @@ -110,6 +110,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +} + +extern void __down_read(struct rw_semaphore *sem); ++extern int __down_read_interruptible(struct rw_semaphore *sem); +extern int __down_read_killable(struct rw_semaphore *sem); +extern int __down_read_trylock(struct rw_semaphore *sem); +extern void __down_write(struct rw_semaphore *sem); @@ -122,7 +123,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +#endif --- /dev/null +++ b/kernel/locking/rwsem-rt.c -@@ -0,0 +1,307 @@ +@@ -0,0 +1,318 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <linux/rwsem.h> +#include <linux/sched/debug.h> @@ -289,6 +290,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> + WARN_ON_ONCE(ret); +} + ++int __down_read_interruptible(struct rw_semaphore *sem) ++{ ++ int ret; ++ ++ ret = __down_read_common(sem, TASK_INTERRUPTIBLE); ++ if (likely(!ret)) ++ return ret; ++ WARN_ONCE(ret != -EINTR, "Unexpected state: %d\n", ret); ++ return -EINTR; ++} ++ +int __down_read_killable(struct rw_semaphore *sem) +{ + int ret; diff --git a/patches/0019-printk-remove-deferred-printing.patch b/patches/0019-printk-remove-deferred-printing.patch index 98462856621c..ed11afe427c8 100644 --- a/patches/0019-printk-remove-deferred-printing.patch +++ b/patches/0019-printk-remove-deferred-printing.patch @@ -422,7 +422,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> -} --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c -@@ -9343,7 +9343,6 @@ void ftrace_dump(enum ftrace_dump_mode o +@@ -9329,7 +9329,6 @@ void ftrace_dump(enum ftrace_dump_mode o tracing_off(); local_irq_save(flags); @@ -430,7 +430,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* Simulate the iterator */ trace_init_global_iter(&iter); -@@ -9423,7 +9422,6 @@ void ftrace_dump(enum ftrace_dump_mode o +@@ -9409,7 +9408,6 @@ void ftrace_dump(enum ftrace_dump_mode o atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); } atomic_dec(&dump_running); diff --git a/patches/0020-locking-rtmutex-wire-up-RT-s-locking.patch b/patches/0020-locking-rtmutex-wire-up-RT-s-locking.patch index 5f280f20ea32..e575ccb1acd7 100644 --- a/patches/0020-locking-rtmutex-wire-up-RT-s-locking.patch +++ b/patches/0020-locking-rtmutex-wire-up-RT-s-locking.patch @@ -227,7 +227,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #include "lock_events.h" /* -@@ -1482,6 +1483,7 @@ static inline void __downgrade_write(str +@@ -1494,6 +1495,7 @@ static inline void __downgrade_write(str if (tmp & RWSEM_FLAG_WAITERS) rwsem_downgrade_wake(sem); } @@ -235,7 +235,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * lock for reading -@@ -1617,7 +1619,9 @@ void down_read_non_owner(struct rw_semap +@@ -1657,7 +1659,9 @@ void down_read_non_owner(struct rw_semap { might_sleep(); __down_read(sem); @@ -245,7 +245,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } EXPORT_SYMBOL(down_read_non_owner); -@@ -1646,7 +1650,9 @@ EXPORT_SYMBOL(down_write_killable_nested +@@ -1686,7 +1690,9 @@ EXPORT_SYMBOL(down_write_killable_nested void up_read_non_owner(struct rw_semaphore *sem) { diff --git a/patches/ftrace-migrate-disable-tracing.patch b/patches/ftrace-migrate-disable-tracing.patch index 355a181805fb..f4ecd3073c96 100644 --- a/patches/ftrace-migrate-disable-tracing.patch +++ b/patches/ftrace-migrate-disable-tracing.patch @@ -20,12 +20,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> }; #define TRACE_EVENT_TYPE_MAX \ -@@ -155,6 +156,7 @@ static inline void tracing_generic_entry - struct task_struct *tsk = current; - +@@ -153,6 +154,7 @@ static inline void tracing_generic_entry + unsigned int trace_ctx) + { entry->preempt_count = trace_ctx & 0xff; + entry->migrate_disable = (trace_ctx >> 8) & 0xff; - entry->pid = (tsk) ? tsk->pid : 0; + entry->pid = current->pid; entry->type = type; entry->flags = trace_ctx >> 16; --- a/kernel/trace/trace.c @@ -43,10 +43,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +#endif +} + - static unsigned int __tracing_gen_ctx_flags(unsigned long irqflags) + unsigned int _tracing_gen_ctx_flags(unsigned long irqflags) { unsigned int trace_flags = 0; -@@ -2608,7 +2617,8 @@ static unsigned int __tracing_gen_ctx_fl +@@ -2603,7 +2612,8 @@ unsigned int _tracing_gen_ctx_flags(unsi trace_flags |= TRACE_FLAG_NEED_RESCHED; if (test_preempt_need_resched()) trace_flags |= TRACE_FLAG_PREEMPT_RESCHED; @@ -55,8 +55,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> + (migration_disable_value() & 0xff) << 8; } - unsigned int _tracing_gen_ctx_flags(unsigned long irqflags) -@@ -3850,9 +3860,10 @@ static void print_lat_help_header(struct + unsigned int tracing_gen_ctx_flags(void) +@@ -3836,9 +3846,10 @@ static void print_lat_help_header(struct "# | / _----=> need-resched \n" "# || / _---=> hardirq/softirq \n" "# ||| / _--=> preempt-depth \n" @@ -70,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } static void print_event_info(struct array_buffer *buf, struct seq_file *m) -@@ -3890,9 +3901,10 @@ static void print_func_help_header_irq(s +@@ -3876,9 +3887,10 @@ static void print_func_help_header_irq(s seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space); seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space); seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space); diff --git a/patches/localversion.patch b/patches/localversion.patch index cad0c1aa7aa9..619fa3040f83 100644 --- a/patches/localversion.patch +++ b/patches/localversion.patch @@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- /dev/null +++ b/localversion-rt @@ -0,0 +1 @@ -+-rt23 ++-rt24 diff --git a/patches/mm-page_alloc-Use-migrate_disable-in-drain_local_pag.patch b/patches/mm-page_alloc-Use-migrate_disable-in-drain_local_pag.patch index 195012a9164c..3e33bb83bf44 100644 --- a/patches/mm-page_alloc-Use-migrate_disable-in-drain_local_pag.patch +++ b/patches/mm-page_alloc-Use-migrate_disable-in-drain_local_pag.patch @@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/mm/page_alloc.c +++ b/mm/page_alloc.c -@@ -3045,9 +3045,9 @@ static void drain_local_pages_wq(struct +@@ -3047,9 +3047,9 @@ static void drain_local_pages_wq(struct * cpu which is allright but we also have to make sure to not move to * a different one. */ diff --git a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch index f9933b7bbff2..c55d5664d1b1 100644 --- a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch +++ b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch @@ -39,7 +39,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> int page_group_by_mobility_disabled __read_mostly; #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT -@@ -1541,11 +1549,11 @@ static void __free_pages_ok(struct page +@@ -1543,11 +1551,11 @@ static void __free_pages_ok(struct page return; migratetype = get_pfnblock_migratetype(page, pfn); @@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } void __free_pages_core(struct page *page, unsigned int order) -@@ -2958,13 +2966,13 @@ void drain_zone_pages(struct zone *zone, +@@ -2960,13 +2968,13 @@ void drain_zone_pages(struct zone *zone, int to_drain, batch; LIST_HEAD(dst); @@ -69,7 +69,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> if (to_drain > 0) free_pcppages_bulk(zone, &dst, false); -@@ -2986,7 +2994,7 @@ static void drain_pages_zone(unsigned in +@@ -2988,7 +2996,7 @@ static void drain_pages_zone(unsigned in LIST_HEAD(dst); int count; @@ -78,7 +78,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> pset = per_cpu_ptr(zone->pageset, cpu); pcp = &pset->pcp; -@@ -2994,7 +3002,7 @@ static void drain_pages_zone(unsigned in +@@ -2996,7 +3004,7 @@ static void drain_pages_zone(unsigned in if (count) isolate_pcp_pages(count, pcp, &dst); @@ -87,7 +87,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> if (count) free_pcppages_bulk(zone, &dst, false); -@@ -3245,9 +3253,9 @@ void free_unref_page(struct page *page) +@@ -3247,9 +3255,9 @@ void free_unref_page(struct page *page) if (!free_unref_page_prepare(page, pfn)) return; @@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> if (!list_empty(&dst)) free_pcppages_bulk(zone, &dst, false); } -@@ -3274,7 +3282,7 @@ void free_unref_page_list(struct list_he +@@ -3276,7 +3284,7 @@ void free_unref_page_list(struct list_he set_page_private(page, pfn); } @@ -108,7 +108,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> list_for_each_entry_safe(page, next, list, lru) { unsigned long pfn = page_private(page); enum zone_type type; -@@ -3289,12 +3297,12 @@ void free_unref_page_list(struct list_he +@@ -3291,12 +3299,12 @@ void free_unref_page_list(struct list_he * a large list of pages to free. */ if (++batch_count == SWAP_CLUSTER_MAX) { @@ -124,7 +124,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> for (i = 0; i < __MAX_NR_ZONES; ) { struct page *page; -@@ -3464,7 +3472,7 @@ static struct page *rmqueue_pcplist(stru +@@ -3466,7 +3474,7 @@ static struct page *rmqueue_pcplist(stru struct page *page; unsigned long flags; @@ -133,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> pcp = &this_cpu_ptr(zone->pageset)->pcp; list = &pcp->lists[migratetype]; page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list); -@@ -3472,7 +3480,7 @@ static struct page *rmqueue_pcplist(stru +@@ -3474,7 +3482,7 @@ static struct page *rmqueue_pcplist(stru __count_zid_vm_events(PGALLOC, page_zonenum(page), 1); zone_statistics(preferred_zone, zone); } @@ -142,7 +142,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return page; } -@@ -3506,7 +3514,8 @@ struct page *rmqueue(struct zone *prefer +@@ -3508,7 +3516,8 @@ struct page *rmqueue(struct zone *prefer * allocate greater than order-1 page units with __GFP_NOFAIL. */ WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); @@ -152,7 +152,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> do { page = NULL; -@@ -3532,7 +3541,7 @@ struct page *rmqueue(struct zone *prefer +@@ -3534,7 +3543,7 @@ struct page *rmqueue(struct zone *prefer __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); zone_statistics(preferred_zone, zone); @@ -161,7 +161,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> out: /* Separate test+clear to avoid unnecessary atomics */ -@@ -3545,7 +3554,7 @@ struct page *rmqueue(struct zone *prefer +@@ -3547,7 +3556,7 @@ struct page *rmqueue(struct zone *prefer return page; failed: @@ -170,7 +170,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return NULL; } -@@ -8798,7 +8807,7 @@ void zone_pcp_reset(struct zone *zone) +@@ -8800,7 +8809,7 @@ void zone_pcp_reset(struct zone *zone) struct per_cpu_pageset *pset; /* avoid races with drain_pages() */ @@ -179,7 +179,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> if (zone->pageset != &boot_pageset) { for_each_online_cpu(cpu) { pset = per_cpu_ptr(zone->pageset, cpu); -@@ -8807,7 +8816,7 @@ void zone_pcp_reset(struct zone *zone) +@@ -8809,7 +8818,7 @@ void zone_pcp_reset(struct zone *zone) free_percpu(zone->pageset); zone->pageset = &boot_pageset; } diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch index bf5eb8e61e48..cc77c74e3593 100644 --- a/patches/preempt-lazy-support.patch +++ b/patches/preempt-lazy-support.patch @@ -250,15 +250,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> }; #define TRACE_EVENT_TYPE_MAX \ -@@ -157,9 +158,10 @@ static inline void tracing_generic_entry - +@@ -155,9 +156,10 @@ static inline void tracing_generic_entry + { entry->preempt_count = trace_ctx & 0xff; entry->migrate_disable = (trace_ctx >> 8) & 0xff; + entry->preempt_lazy_count = (trace_ctx >> 16) & 0xff; - entry->pid = (tsk) ? tsk->pid : 0; + entry->pid = current->pid; entry->type = type; - entry->flags = trace_ctx >> 16; -+ entry->flags = trace_ctx >> 24; ++ entry->flags = trace_ctx >> 24; } unsigned int _tracing_gen_ctx_flags(unsigned long irqflags); @@ -529,7 +529,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c -@@ -2617,8 +2617,11 @@ static unsigned int __tracing_gen_ctx_fl +@@ -2612,8 +2612,11 @@ unsigned int _tracing_gen_ctx_flags(unsi trace_flags |= TRACE_FLAG_NEED_RESCHED; if (test_preempt_need_resched()) trace_flags |= TRACE_FLAG_PREEMPT_RESCHED; @@ -542,8 +542,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> + (trace_flags << 24); } - unsigned int _tracing_gen_ctx_flags(unsigned long irqflags) -@@ -3855,15 +3858,17 @@ unsigned long trace_total_entries(struct + unsigned int tracing_gen_ctx_flags(void) +@@ -3841,15 +3844,17 @@ unsigned long trace_total_entries(struct static void print_lat_help_header(struct seq_file *m) { @@ -570,7 +570,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } static void print_event_info(struct array_buffer *buf, struct seq_file *m) -@@ -3897,14 +3902,16 @@ static void print_func_help_header_irq(s +@@ -3883,14 +3888,16 @@ static void print_func_help_header_irq(s print_event_info(buf, m); diff --git a/patches/series b/patches/series index 05560731ffd9..bcaaf08eac88 100644 --- a/patches/series +++ b/patches/series @@ -117,8 +117,10 @@ notifier-Make-atomic_notifiers-use-raw_spinlock.patch 0004-doc-Update-RCU-s-requirements-page-about-the-PREEMPT.patch 0005-doc-Use-CONFIG_PREEMPTION.patch -# 20201216172205.gvpizdw4kzpn326q@linutronix.de -tracing-Merge-irqflags-preemt-counter-add-RT-bits.patch +# 20210112230057.2374308-1-bigeasy@linutronix.de +0001-tracing-Merge-irqflags-preempt-counter.patch +0002-tracing-Use-in_serving_softirq-to-deduct-softirq-sta.patch +0003-tracing-Remove-NULL-check-from-current-in-tracing_ge.patch ############################################################ # Ready for posting diff --git a/patches/softirq-disable-softirq-stacks-for-rt.patch b/patches/softirq-disable-softirq-stacks-for-rt.patch index 2df68abe6dcf..9f5580cc5f83 100644 --- a/patches/softirq-disable-softirq-stacks-for-rt.patch +++ b/patches/softirq-disable-softirq-stacks-for-rt.patch @@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c -@@ -769,10 +769,12 @@ void *mcheckirq_ctx[NR_CPUS] __read_most +@@ -724,10 +724,12 @@ void *mcheckirq_ctx[NR_CPUS] __read_most void *softirq_ctx[NR_CPUS] __read_mostly; void *hardirq_ctx[NR_CPUS] __read_mostly; diff --git a/patches/x86-Enable-RT-also-on-32bit.patch b/patches/x86-Enable-RT-also-on-32bit.patch index b00d91a3827e..96d7ecf5c9cd 100644 --- a/patches/x86-Enable-RT-also-on-32bit.patch +++ b/patches/x86-Enable-RT-also-on-32bit.patch @@ -9,7 +9,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig -@@ -26,7 +26,6 @@ config X86_64 +@@ -27,7 +27,6 @@ config X86_64 # Options that are inherently 64-bit kernel only: select ARCH_HAS_GIGANTIC_PAGE select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 @@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> select ARCH_USE_CMPXCHG_LOCKREF select HAVE_ARCH_SOFT_DIRTY select MODULES_USE_ELF_RELA -@@ -94,6 +93,7 @@ config X86 +@@ -95,6 +94,7 @@ config X86 select ARCH_SUPPORTS_ACPI select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_NUMA_BALANCING if X86_64 diff --git a/patches/x86-Enable-RT.patch b/patches/x86-Enable-RT.patch index 85676261018a..87f17975b972 100644 --- a/patches/x86-Enable-RT.patch +++ b/patches/x86-Enable-RT.patch @@ -11,7 +11,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig -@@ -26,6 +26,7 @@ config X86_64 +@@ -27,6 +27,7 @@ config X86_64 # Options that are inherently 64-bit kernel only: select ARCH_HAS_GIGANTIC_PAGE select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 diff --git a/patches/x86-preempt-lazy.patch b/patches/x86-preempt-lazy.patch index 92a051a5e4a6..7531ffab076e 100644 --- a/patches/x86-preempt-lazy.patch +++ b/patches/x86-preempt-lazy.patch @@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig -@@ -211,6 +211,7 @@ config X86 +@@ -212,6 +212,7 @@ config X86 select HAVE_PCI select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP |